hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
1182655f76271dcf005a3cec1dde4237366af38b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/mean_subtraction.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> void MeanSubtractionCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_running_mean_) { // Training mode. forward_impl_batch(inputs, outputs); } else { // Testing mode. forward_impl_global(inputs, outputs); } } template <typename T> __global__ void kernel_mean_subtraction_inc_t(T *t, const int max) { if (t[0] < max) { t[0] = t[0] + 1; } } template <typename T> __global__ void kernel_mean_subtraction_forward_batch(const int size1_, const int size0_, const T *x, T *m, T *rm, T *y, const int *t) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { T coef = 1.0 / ((*t) + 1); // Batch mean T mean = 0; for (int i0 = 0; i0 < size0_; ++i0) { mean += x[i1 + i0 * size1_]; } m[i1] = mean / size0_; // Moving mean rm[i1] = rm[i1] + (m[i1] - rm[i1]) * coef; // Output for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_batch(const Variables &inputs, const Variables &outputs) { // Inputs const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); // Output Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); Variable *batch_mean = &this->mean_; Tc *m = batch_mean->cast_data_and_get_pointer<Tc>(this->ctx_, true); // batch mean // Inputs/Outputs Tc *rm = inputs[1]->cast_data_and_get_pointer<Tc>(this->ctx_); // running mean int *t = inputs[2]->cast_data_and_get_pointer<int>(this->ctx_); // running count NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_batch, this->size1_, this->size0_, x, m, rm, y, t); hipLaunchKernelGGL(( kernel_mean_subtraction_inc_t), dim3(1), dim3(1), 0, 0, t, std::numeric_limits<int>::max()); } template <typename T> __global__ void kernel_mean_subtraction_forward_global(const int size1_, const int size0_, const T *x, const T *rm, T *y) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_global(const Variables &inputs, const Variables &outputs) { // Inputs const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *rm = inputs[1]->get_data_pointer<Tc>(this->ctx_); // running mean // Output Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_global, this->size1_, this->size0_, x, rm, y); } template <typename T> void MeanSubtractionCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_running_mean_) { // Training mode. backward_impl_batch(inputs, outputs, propagate_down, accum); } else { // Testing mode. backward_impl_global(inputs, outputs, propagate_down, accum); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_batch(const int num, T *dx, const T *dy, const int *t, const int size0_) { const T factor = (T)1.0 / ((*t) * size0_); NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + dy[idx] * (1 - factor); } } template <class T> void MeanSubtractionCuda<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); const int *t = inputs[2]->get_data_pointer<int>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<Tc, true>), size, dx, dy, t, this->size0_); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<Tc, false>), size, dx, dy, t, this->size0_); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_global(const int num, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + dy[idx]; } } template <class T> void MeanSubtractionCuda<T>::backward_impl_global( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<Tc, true>), size, dx, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<Tc, false>), size, dx, dy); } } }
1182655f76271dcf005a3cec1dde4237366af38b.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/mean_subtraction.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> void MeanSubtractionCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_running_mean_) { // Training mode. forward_impl_batch(inputs, outputs); } else { // Testing mode. forward_impl_global(inputs, outputs); } } template <typename T> __global__ void kernel_mean_subtraction_inc_t(T *t, const int max) { if (t[0] < max) { t[0] = t[0] + 1; } } template <typename T> __global__ void kernel_mean_subtraction_forward_batch(const int size1_, const int size0_, const T *x, T *m, T *rm, T *y, const int *t) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { T coef = 1.0 / ((*t) + 1); // Batch mean T mean = 0; for (int i0 = 0; i0 < size0_; ++i0) { mean += x[i1 + i0 * size1_]; } m[i1] = mean / size0_; // Moving mean rm[i1] = rm[i1] + (m[i1] - rm[i1]) * coef; // Output for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_batch(const Variables &inputs, const Variables &outputs) { // Inputs const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); // Output Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); Variable *batch_mean = &this->mean_; Tc *m = batch_mean->cast_data_and_get_pointer<Tc>(this->ctx_, true); // batch mean // Inputs/Outputs Tc *rm = inputs[1]->cast_data_and_get_pointer<Tc>(this->ctx_); // running mean int *t = inputs[2]->cast_data_and_get_pointer<int>(this->ctx_); // running count NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_batch, this->size1_, this->size0_, x, m, rm, y, t); kernel_mean_subtraction_inc_t<<<1, 1>>>(t, std::numeric_limits<int>::max()); } template <typename T> __global__ void kernel_mean_subtraction_forward_global(const int size1_, const int size0_, const T *x, const T *rm, T *y) { NBLA_CUDA_KERNEL_LOOP(i1, size1_) { for (int i0 = 0; i0 < size0_; ++i0) { y[i1 + i0 * size1_] = x[i1 + i0 * size1_] - rm[i1]; } } } template <class T> void MeanSubtractionCuda<T>::forward_impl_global(const Variables &inputs, const Variables &outputs) { // Inputs const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *rm = inputs[1]->get_data_pointer<Tc>(this->ctx_); // running mean // Output Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_mean_subtraction_forward_global, this->size1_, this->size0_, x, rm, y); } template <typename T> void MeanSubtractionCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (this->update_running_mean_) { // Training mode. backward_impl_batch(inputs, outputs, propagate_down, accum); } else { // Testing mode. backward_impl_global(inputs, outputs, propagate_down, accum); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_batch(const int num, T *dx, const T *dy, const int *t, const int size0_) { const T factor = (T)1.0 / ((*t) * size0_); NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + dy[idx] * (1 - factor); } } template <class T> void MeanSubtractionCuda<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); const int *t = inputs[2]->get_data_pointer<int>(this->ctx_); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<Tc, true>), size, dx, dy, t, this->size0_); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_batch<Tc, false>), size, dx, dy, t, this->size0_); } } template <typename T, bool accum> __global__ void kernel_mean_subtraction_backward_global(const int num, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx] = (accum ? dx[idx] : (T)0) + dy[idx]; } } template <class T> void MeanSubtractionCuda<T>::backward_impl_global( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); size_t size = inputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<Tc, true>), size, dx, dy); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_mean_subtraction_backward_global<Tc, false>), size, dx, dy); } } }
7285f5d296d5698b0b4a9ee64eaf782c8f2633c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "cuda_helper.h" #include "simlc.h" __global__ void sim_lc(float * const front, float * const back, const size_t plates, const size_t mobiles) { int i = X(); if (i >= plates - 1) return; // move mobile phase for (int m = 0; m < mobiles; m++) { int j = m * plates + i; float val = front[j]; front[j] = 0; back[j + 0] += 0.2f * val; back[j + 1] += 0.8f * val; } } __host__ hipError_t invoke_sim_lc( float * const et, const dim3 grid, const dim3 block, float * const front, float * const back, const size_t plates, const size_t mobiles) { hipError_t status; hipEventRecord(start); sim_lc << <grid, block >> > (front, back, plates, mobiles); hipEventRecord(stop); if ((status = hipGetLastError()) != hipSuccess) { fprintf(stderr, "hipGetLastError(): %s\n", hipGetErrorString(status)); return status; } if ((status = hipDeviceSynchronize()) != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize(): %s\n", hipGetErrorString(status)); return status; } hipEventSynchronize(stop); hipEventElapsedTime(et, start, stop); return status; } int main() { const size_t plates = 20000; const size_t mobiles = 1; const size_t io_size = plates * mobiles; float et; dim3 grid(io_size / 32 + 1), block(32); init_buffer(io_size); float *front_dev, *back_dev; if (init_device(&front_dev, &back_dev, front_buf, back_buf, io_size) != hipSuccess) goto exit; while (true) { if ((invoke_sim_lc(&et, grid, block, front_dev, back_dev, plates, mobiles)) != hipSuccess) goto cleanup; if (sync_buffer_to_host(front_buf, back_dev, io_size) != hipSuccess) goto cleanup; SWAP_BUFFER(front_dev, back_dev); printf("elapsed time %f\n", et); //print_buffer(front_buf, plates, mobiles); getchar(); } cleanup: deinit_device(front_dev, back_dev); exit: return 0; }
7285f5d296d5698b0b4a9ee64eaf782c8f2633c4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include "cuda_helper.h" #include "simlc.h" __global__ void sim_lc(float * const front, float * const back, const size_t plates, const size_t mobiles) { int i = X(); if (i >= plates - 1) return; // move mobile phase for (int m = 0; m < mobiles; m++) { int j = m * plates + i; float val = front[j]; front[j] = 0; back[j + 0] += 0.2f * val; back[j + 1] += 0.8f * val; } } __host__ cudaError_t invoke_sim_lc( float * const et, const dim3 grid, const dim3 block, float * const front, float * const back, const size_t plates, const size_t mobiles) { cudaError_t status; cudaEventRecord(start); sim_lc << <grid, block >> > (front, back, plates, mobiles); cudaEventRecord(stop); if ((status = cudaGetLastError()) != cudaSuccess) { fprintf(stderr, "cudaGetLastError(): %s\n", cudaGetErrorString(status)); return status; } if ((status = cudaDeviceSynchronize()) != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize(): %s\n", cudaGetErrorString(status)); return status; } cudaEventSynchronize(stop); cudaEventElapsedTime(et, start, stop); return status; } int main() { const size_t plates = 20000; const size_t mobiles = 1; const size_t io_size = plates * mobiles; float et; dim3 grid(io_size / 32 + 1), block(32); init_buffer(io_size); float *front_dev, *back_dev; if (init_device(&front_dev, &back_dev, front_buf, back_buf, io_size) != cudaSuccess) goto exit; while (true) { if ((invoke_sim_lc(&et, grid, block, front_dev, back_dev, plates, mobiles)) != cudaSuccess) goto cleanup; if (sync_buffer_to_host(front_buf, back_dev, io_size) != cudaSuccess) goto cleanup; SWAP_BUFFER(front_dev, back_dev); printf("elapsed time %f\n", et); //print_buffer(front_buf, plates, mobiles); getchar(); } cleanup: deinit_device(front_dev, back_dev); exit: return 0; }
ee3316ea51512cae96c4c335aa27d9c911bde502.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "helpers.cuh" struct Edge { int vertex; struct Edge * next; }; // Inserts Node to the Linked List by Head Insertion - O(1) // Returns address of head which is the newly created node. struct Edge * AddEdge(struct Edge * currentHead, int newVertex){ struct Edge * newHead; hipMallocManaged(&newHead,sizeof(struct Edge)*1); checkCudaError(); newHead->vertex = newVertex; newHead->next = currentHead; return newHead; } __global__ void recursiveTraverse(int index, struct Edge ** adjacencyList,int * level,int * parent,int levC, int * finalLevel){ struct Edge * traverse; traverse = adjacencyList[index]; //next vertex in the next level while (traverse != NULL) { int nextElement = traverse->vertex; if(level[nextElement]==-1 ){ level[nextElement] = levC + 1; parent[nextElement] = index; if(levC+1!=*finalLevel) { hipLaunchKernelGGL(( recursiveTraverse), dim3(1),dim3(1), 0, 0, nextElement,adjacencyList,level,parent,levC+1,finalLevel); hipDeviceSynchronize(); } } traverse= traverse->next; } } __global__ void BreadthFirstSearch( struct Edge ** adjacencyList, int * vertices, int * parent, int * level, int* startVertices,int * lev, int * finalLevel,int count){ int index = startVertices[threadIdx.x]; if(index<count){ hipLaunchKernelGGL(( recursiveTraverse), dim3(1),dim3(1), 0, 0, index,adjacencyList,level,parent,*lev,finalLevel ); hipDeviceSynchronize(); } } int main(){ hipDeviceSetCacheConfig(hipFuncCachePreferL1); //hipDeviceSetCacheConfig(hipFuncCachePreferShared:); //global variable asigning int * vertices; int * edges; int * lev; int * level; int * parent; int * inputsCircuits; int * startArrayCount; int * finalLevel; struct Edge ** adjacencyList; //CPU variables only int v1,v2,i; //int levelSize=2; //unfied memory allocation for int values hipMallocManaged(&vertices,sizeof(int)*1); checkCudaError(); hipMallocManaged(&edges,sizeof(int)*1); checkCudaError(); hipMallocManaged(&lev,sizeof(int)*1); checkCudaError(); hipMallocManaged(&startArrayCount,sizeof(int)*1); checkCudaError(); hipMallocManaged(&finalLevel,sizeof(int)*1); checkCudaError(); //*finalLevel = 2; //scan first line of graph data int noOfRows; FILE * graphFile =fopen("data3/circuitV2.txt","r"); fscanf(graphFile,"%d" ,finalLevel); printf("Total no of levels %d \n", *finalLevel); fscanf(graphFile, "%d %d %d",&noOfRows, vertices, edges); printf("No fo rows %d, No of Cols %d, nnz %d \n",noOfRows,*vertices,*edges); //- done //unified memory allocation for arrays hipMallocManaged(&level,sizeof(int)*(*vertices)); checkCudaError(); hipMallocManaged(&parent,sizeof(int)*(*vertices)); checkCudaError(); hipMallocManaged(&(adjacencyList),sizeof(struct Edge*)*(*vertices)); checkCudaError(); //initialise main arrays for (i = 0; i < *vertices; ++i) { adjacencyList[i] = NULL; parent[i] = 0; level[i] = -1; } //scan rest of the graph and create the adjacency list for (i = 0; i < *edges; ++i) { int val; fscanf(graphFile, "%d %d %d",&v1, &v2, &val); // Adding edge v1 --> v2 adjacencyList[v1] = AddEdge(adjacencyList[v1], v2); } // Printing Adjacency List printf("\nAdjacency List - of graph \n\n"); for (i = 0; i < *vertices; ++i) { printf("adjacencyList[%d] -> ", i); struct Edge * traverse = adjacencyList[i]; while (traverse != NULL) { printf("%d -> ", traverse->vertex); traverse = traverse->next; } printf("NULL\n"); } //scan the input vertices file, here only the input pins vertices are only available FILE * vectorFile= fopen("data3/inputV1.txt","r"); fscanf(vectorFile,"%d",startArrayCount); //unified memory allocation for input vertice vector hipMallocManaged(&inputsCircuits,sizeof(int)*(*startArrayCount)); checkCudaError(); //asign values for input vector for(i=0;i<*startArrayCount;i++){ int tempVal; fscanf(vectorFile,"%d",&tempVal); inputsCircuits[i]= tempVal; level[tempVal]=0; } *lev = 0; int count= *startArrayCount; //start Time measurement hipEvent_t start,stop; float elapsedtime; hipEventCreate(&start); hipEventRecord(start,0); //ceil(*vertices/256.0),256 hipLaunchKernelGGL(( BreadthFirstSearch), dim3(ceil(count/256.0)),dim3(256), 0, 0, adjacencyList, vertices, parent, level, inputsCircuits, lev,finalLevel,count); hipDeviceSynchronize(); checkCudaError(); //stop Time measurement hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedtime,start,stop); fprintf(stderr,"Time spent for kernel : %.10f seconds\n",elapsedtime/(float)1000); //print the output printf("\nLevel and Parent Arrays -\n"); for (i = 0; i < *vertices; ++i) { printf("Level of Vertex %d is %d, Parent is %d\n", i, level[i], parent[i]); } printf("vertices in level order when traversing :\n"); int b; for(b=0;b<=*finalLevel;b++){ for (i = 0; i < *vertices; ++i) { if(level[i]==b){ printf("%d ,", i); } } printf(" | "); } return 0; }
ee3316ea51512cae96c4c335aa27d9c911bde502.cu
#include <stdio.h> #include <stdlib.h> #include "helpers.cuh" struct Edge { int vertex; struct Edge * next; }; // Inserts Node to the Linked List by Head Insertion - O(1) // Returns address of head which is the newly created node. struct Edge * AddEdge(struct Edge * currentHead, int newVertex){ struct Edge * newHead; cudaMallocManaged(&newHead,sizeof(struct Edge)*1); checkCudaError(); newHead->vertex = newVertex; newHead->next = currentHead; return newHead; } __global__ void recursiveTraverse(int index, struct Edge ** adjacencyList,int * level,int * parent,int levC, int * finalLevel){ struct Edge * traverse; traverse = adjacencyList[index]; //next vertex in the next level while (traverse != NULL) { int nextElement = traverse->vertex; if(level[nextElement]==-1 ){ level[nextElement] = levC + 1; parent[nextElement] = index; if(levC+1!=*finalLevel) { recursiveTraverse<<<1,1>>>(nextElement,adjacencyList,level,parent,levC+1,finalLevel); cudaDeviceSynchronize(); } } traverse= traverse->next; } } __global__ void BreadthFirstSearch( struct Edge ** adjacencyList, int * vertices, int * parent, int * level, int* startVertices,int * lev, int * finalLevel,int count){ int index = startVertices[threadIdx.x]; if(index<count){ recursiveTraverse<<<1,1>>> (index,adjacencyList,level,parent,*lev,finalLevel ); cudaDeviceSynchronize(); } } int main(){ cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); //cudaDeviceSetCacheConfig(cudaFuncCachePreferShared:); //global variable asigning int * vertices; int * edges; int * lev; int * level; int * parent; int * inputsCircuits; int * startArrayCount; int * finalLevel; struct Edge ** adjacencyList; //CPU variables only int v1,v2,i; //int levelSize=2; //unfied memory allocation for int values cudaMallocManaged(&vertices,sizeof(int)*1); checkCudaError(); cudaMallocManaged(&edges,sizeof(int)*1); checkCudaError(); cudaMallocManaged(&lev,sizeof(int)*1); checkCudaError(); cudaMallocManaged(&startArrayCount,sizeof(int)*1); checkCudaError(); cudaMallocManaged(&finalLevel,sizeof(int)*1); checkCudaError(); //*finalLevel = 2; //scan first line of graph data int noOfRows; FILE * graphFile =fopen("data3/circuitV2.txt","r"); fscanf(graphFile,"%d" ,finalLevel); printf("Total no of levels %d \n", *finalLevel); fscanf(graphFile, "%d %d %d",&noOfRows, vertices, edges); printf("No fo rows %d, No of Cols %d, nnz %d \n",noOfRows,*vertices,*edges); //- done //unified memory allocation for arrays cudaMallocManaged(&level,sizeof(int)*(*vertices)); checkCudaError(); cudaMallocManaged(&parent,sizeof(int)*(*vertices)); checkCudaError(); cudaMallocManaged(&(adjacencyList),sizeof(struct Edge*)*(*vertices)); checkCudaError(); //initialise main arrays for (i = 0; i < *vertices; ++i) { adjacencyList[i] = NULL; parent[i] = 0; level[i] = -1; } //scan rest of the graph and create the adjacency list for (i = 0; i < *edges; ++i) { int val; fscanf(graphFile, "%d %d %d",&v1, &v2, &val); // Adding edge v1 --> v2 adjacencyList[v1] = AddEdge(adjacencyList[v1], v2); } // Printing Adjacency List printf("\nAdjacency List - of graph \n\n"); for (i = 0; i < *vertices; ++i) { printf("adjacencyList[%d] -> ", i); struct Edge * traverse = adjacencyList[i]; while (traverse != NULL) { printf("%d -> ", traverse->vertex); traverse = traverse->next; } printf("NULL\n"); } //scan the input vertices file, here only the input pins vertices are only available FILE * vectorFile= fopen("data3/inputV1.txt","r"); fscanf(vectorFile,"%d",startArrayCount); //unified memory allocation for input vertice vector cudaMallocManaged(&inputsCircuits,sizeof(int)*(*startArrayCount)); checkCudaError(); //asign values for input vector for(i=0;i<*startArrayCount;i++){ int tempVal; fscanf(vectorFile,"%d",&tempVal); inputsCircuits[i]= tempVal; level[tempVal]=0; } *lev = 0; int count= *startArrayCount; //start Time measurement cudaEvent_t start,stop; float elapsedtime; cudaEventCreate(&start); cudaEventRecord(start,0); //ceil(*vertices/256.0),256 BreadthFirstSearch<<<ceil(count/256.0),256>>>(adjacencyList, vertices, parent, level, inputsCircuits, lev,finalLevel,count); cudaDeviceSynchronize(); checkCudaError(); //stop Time measurement cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedtime,start,stop); fprintf(stderr,"Time spent for kernel : %.10f seconds\n",elapsedtime/(float)1000); //print the output printf("\nLevel and Parent Arrays -\n"); for (i = 0; i < *vertices; ++i) { printf("Level of Vertex %d is %d, Parent is %d\n", i, level[i], parent[i]); } printf("vertices in level order when traversing :\n"); int b; for(b=0;b<=*finalLevel;b++){ for (i = 0; i < *vertices; ++i) { if(level[i]==b){ printf("%d ,", i); } } printf(" | "); } return 0; }
76dc88e4c12b92f5c64f0cbc19d165abc41df10c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/convert/convert_fixed_point.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <strings/convert/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/optional.h> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { namespace { struct string_to_decimal_base { /** * @brief Return the integer component of a decimal string. * * This is reads everything up to the exponent 'e' notation. * The return includes the integer digits and any exponent offset. * * @param[in,out] iter Start of characters to parse * @param[in] end End of characters to parse * @return Integer component and exponent offset. */ __device__ thrust::pair<uint64_t, int32_t> parse_integer(char const*& iter, char const* iter_end) const { // highest value where another decimal digit cannot be appended without an overflow; // this preserves the most digits when scaling the final result constexpr uint64_t decimal_max = (std::numeric_limits<uint64_t>::max() - 9L) / 10L; uint64_t value = 0; // for checking overflow int32_t exp_offset = 0; bool decimal_found = false; while (iter < iter_end) { auto const ch = *iter++; if (ch == '.' && !decimal_found) { decimal_found = true; continue; } if (ch < '0' || ch > '9') { --iter; break; } if (value > decimal_max) { exp_offset += static_cast<int32_t>(!decimal_found); } else { value = (value * 10) + static_cast<uint64_t>(ch - '0'); exp_offset -= static_cast<int32_t>(decimal_found); } } return {value, exp_offset}; } /** * @brief Return the exponent of a decimal string. * * This should only be called after the exponent 'e' notation was detected. * The return is the exponent (base-10) integer and can only be * invalid if `check_only == true` and invalid characters are found or the * exponent overflows an int32. * * @tparam check_only Set to true to verify the characters are valid and the * exponent value in the decimal string does not overflow int32 * @param[in,out] iter Start of characters to parse * (points to the character after the 'E' or 'e') * @param[in] end End of characters to parse * @return Integer value of the exponent */ template <bool check_only = false> __device__ thrust::optional<int32_t> parse_exponent(char const* iter, char const* iter_end) const { constexpr uint32_t exponent_max = static_cast<uint32_t>(std::numeric_limits<int32_t>::max()); // get optional exponent sign int32_t const exp_sign = [&iter] { auto const ch = *iter; if (ch != '-' && ch != '+') { return 1; } ++iter; return (ch == '-' ? -1 : 1); }(); // parse exponent integer int32_t exp_ten = 0; while (iter < iter_end) { auto const ch = *iter++; if (ch < '0' || ch > '9') { if (check_only) { return thrust::nullopt; } break; } uint32_t exp_check = static_cast<uint32_t>(exp_ten * 10) + static_cast<uint32_t>(ch - '0'); if (check_only && (exp_check > exponent_max)) { return thrust::nullopt; } // check overflow exp_ten = static_cast<int32_t>(exp_check); } return exp_ten * exp_sign; } }; /** * @brief Converts strings into an integers and records decimal places. * * The conversion uses the provided scale to build the resulting * integer. This can prevent overflow for strings with many digits. */ template <typename DecimalType> struct string_to_decimal_fn : string_to_decimal_base { column_device_view const d_strings; int32_t const scale; string_to_decimal_fn(column_device_view const& d_strings, int32_t scale) : d_strings(d_strings), scale(scale) { } __device__ DecimalType operator()(size_type idx) const { if (d_strings.is_null(idx)) { return 0; } auto const d_str = d_strings.element<string_view>(idx); if (d_str.empty()) { return 0; } auto const sign = [&] { if (d_str.data()[0] == '-') { return -1; } if (d_str.data()[0] == '+') { return 1; } return 0; }(); auto iter = d_str.data() + (sign != 0); auto const iter_end = d_str.data() + d_str.size_bytes(); auto [value, exp_offset] = parse_integer(iter, iter_end); if (value == 0) { return DecimalType{0}; } // check for exponent int32_t exp_ten = 0; if ((iter < iter_end) && (*iter == 'e' || *iter == 'E')) { ++iter; if (iter < iter_end) { exp_ten = parse_exponent<false>(iter, iter_end).value(); } } exp_ten += exp_offset; // shift the output value based on the exp_ten and the scale values if (exp_ten < scale) { value = value / static_cast<uint64_t>(exp10(static_cast<double>(scale - exp_ten))); } else { value = value * static_cast<uint64_t>(exp10(static_cast<double>(exp_ten - scale))); } return static_cast<DecimalType>(value) * (sign == 0 ? 1 : sign); } }; /** * @brief This only checks the string format for valid decimal characters. * * This follows closely the logic above but just ensures there are valid * characters for conversion and the integer component does not overflow. */ template <typename DecimalType> struct string_to_decimal_check_fn : string_to_decimal_base { column_device_view const d_strings; int32_t const scale; string_to_decimal_check_fn(column_device_view const& d_strings, int32_t scale) : d_strings(d_strings), scale(scale) { } __device__ bool operator()(size_type idx) const { if (d_strings.is_null(idx)) { return false; } auto const d_str = d_strings.element<string_view>(idx); if (d_str.empty()) { return false; } auto iter = d_str.data() + static_cast<int>((d_str.data()[0] == '-' || d_str.data()[0] == '+')); auto const iter_end = d_str.data() + d_str.size_bytes(); auto [value, exp_offset] = parse_integer(iter, iter_end); // only exponent notation is expected here if ((iter < iter_end) && (*iter != 'e' && *iter != 'E')) { return false; } ++iter; int32_t exp_ten = 0; // check exponent overflow if (iter < iter_end) { auto exp_result = parse_exponent<true>(iter, iter_end); if (!exp_result) { return false; } exp_ten = exp_result.value(); } exp_ten += exp_offset; // finally, check for overflow based on the exp_ten and scale values return (exp_ten < scale) ? true : value <= static_cast<uint64_t>( std::numeric_limits<DecimalType>::max() / static_cast<DecimalType>(exp10(static_cast<double>(exp_ten - scale)))); } }; /** * @brief The dispatch function for converting strings column to fixed-point column. */ struct dispatch_to_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; auto const d_column = column_device_view::create(input.parent(), stream); // create output column auto results = make_fixed_point_column(output_type, input.size(), cudf::detail::copy_bitmask(input.parent(), stream, mr), input.null_count(), stream, mr); auto d_results = results->mutable_view().data<DecimalType>(); // convert strings into decimal values thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(input.size()), d_results, string_to_decimal_fn<DecimalType>{*d_column, output_type.scale()}); results->set_null_count(input.null_count()); return results; } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const&, data_type, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Output for to_fixed_point must be a decimal type."); } }; } // namespace // This will convert a strings column into any integer column type. std::unique_ptr<column> to_fixed_point(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return make_empty_column(output_type); return type_dispatcher(output_type, dispatch_to_fixed_point_fn{}, input, output_type, stream, mr); } } // namespace detail // external API std::unique_ptr<column> to_fixed_point(strings_column_view const& strings, data_type output_type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_fixed_point(strings, output_type, rmm::cuda_stream_default, mr); } namespace detail { namespace { /** * @brief Calculate the size of the each string required for * converting each value in base-10 format. * * ouput format is [-]integer.fraction */ template <typename DecimalType> struct decimal_to_string_size_fn { column_device_view const d_column; __device__ int32_t operator()(size_type idx) const { if (d_column.is_null(idx)) return 0; auto const value = d_column.element<DecimalType>(idx); auto const scale = d_column.type().scale(); if (scale >= 0) return count_digits(value) + scale; auto const abs_value = std::abs(value); auto const exp_ten = static_cast<int64_t>(exp10(static_cast<double>(-scale))); auto const fraction = count_digits(abs_value % exp_ten); auto const num_zeros = ::max(0, (-scale - fraction)); return static_cast<int32_t>(value < 0) + // sign if negative count_digits(abs_value / exp_ten) + // integer 1 + // decimal point num_zeros + // zeros padding fraction; // size of fraction } }; /** * @brief Convert each value into a string. * * The value is converted into base-10 digits [0-9] * plus the decimal point and a negative sign prefix. */ template <typename DecimalType> struct decimal_to_string_fn { column_device_view const d_column; int32_t const* d_offsets; char* d_chars; __device__ void operator()(size_type idx) { if (d_column.is_null(idx)) return; auto const value = d_column.element<DecimalType>(idx); auto const scale = d_column.type().scale(); char* d_buffer = d_chars + d_offsets[idx]; if (scale >= 0) { d_buffer += integer_to_string(value, d_buffer); thrust::generate_n(thrust::seq, d_buffer, scale, []() { return '0'; }); // add zeros return; } // scale < 0 // write format: [-]integer.fraction // where integer = abs(value) / (10^abs(scale)) // fraction = abs(value) % (10^abs(scale)) auto const abs_value = std::abs(value); if (value < 0) *d_buffer++ = '-'; // add sign auto const exp_ten = static_cast<int64_t>(exp10(static_cast<double>(-scale))); auto const num_zeros = ::max(0, (-scale - count_digits(abs_value % exp_ten))); d_buffer += integer_to_string(abs_value / exp_ten, d_buffer); // add the integer part *d_buffer++ = '.'; // add decimal point thrust::generate_n(thrust::seq, d_buffer, num_zeros, []() { return '0'; }); // add zeros d_buffer += num_zeros; integer_to_string(abs_value % exp_ten, d_buffer); // add the fraction part } }; /** * @brief The dispatcher functor for converting fixed-point values into strings. */ struct dispatch_from_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; // underlying value type auto const d_column = column_device_view::create(input, stream); // build offsets column auto offsets_transformer_itr = cudf::detail::make_counting_transform_iterator( 0, decimal_to_string_size_fn<DecimalType>{*d_column}); auto offsets_column = detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + input.size(), stream, mr); auto const d_offsets = offsets_column->view().template data<int32_t>(); // build chars column auto const bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), input.size(), stream); auto chars_column = detail::create_chars_child_column(input.size(), bytes, stream, mr); auto d_chars = chars_column->mutable_view().template data<char>(); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), input.size(), decimal_to_string_fn<DecimalType>{*d_column, d_offsets, d_chars}); return make_strings_column(input.size(), std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input, stream, mr), stream, mr); } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Values for from_fixed_point function must be a decimal type."); } }; } // namespace std::unique_ptr<column> from_fixed_point(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return detail::make_empty_strings_column(stream, mr); return type_dispatcher(input.type(), dispatch_from_fixed_point_fn{}, input, stream, mr); } } // namespace detail // external API std::unique_ptr<column> from_fixed_point(column_view const& input, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_fixed_point(input, rmm::cuda_stream_default, mr); } namespace detail { namespace { struct dispatch_is_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const& input, data_type decimal_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; auto const d_column = column_device_view::create(input.parent(), stream); // create output column auto results = make_numeric_column(data_type{type_id::BOOL8}, input.size(), cudf::detail::copy_bitmask(input.parent(), stream, mr), input.null_count(), stream, mr); auto d_results = results->mutable_view().data<bool>(); // check strings for valid fixed-point chars thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(input.size()), d_results, string_to_decimal_check_fn<DecimalType>{*d_column, decimal_type.scale()}); results->set_null_count(input.null_count()); return results; } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const&, data_type, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("is_fixed_point is expecting a decimal type"); } }; } // namespace std::unique_ptr<column> is_fixed_point(strings_column_view const& input, data_type decimal_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return cudf::make_empty_column(data_type{type_id::BOOL8}); return type_dispatcher( decimal_type, dispatch_is_fixed_point_fn{}, input, decimal_type, stream, mr); } } // namespace detail std::unique_ptr<column> is_fixed_point(strings_column_view const& input, data_type decimal_type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::is_fixed_point(input, decimal_type, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
76dc88e4c12b92f5c64f0cbc19d165abc41df10c.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/convert/convert_fixed_point.hpp> #include <cudf/strings/detail/converters.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/detail/utilities.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <strings/convert/utilities.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/optional.h> #include <thrust/transform.h> namespace cudf { namespace strings { namespace detail { namespace { struct string_to_decimal_base { /** * @brief Return the integer component of a decimal string. * * This is reads everything up to the exponent 'e' notation. * The return includes the integer digits and any exponent offset. * * @param[in,out] iter Start of characters to parse * @param[in] end End of characters to parse * @return Integer component and exponent offset. */ __device__ thrust::pair<uint64_t, int32_t> parse_integer(char const*& iter, char const* iter_end) const { // highest value where another decimal digit cannot be appended without an overflow; // this preserves the most digits when scaling the final result constexpr uint64_t decimal_max = (std::numeric_limits<uint64_t>::max() - 9L) / 10L; uint64_t value = 0; // for checking overflow int32_t exp_offset = 0; bool decimal_found = false; while (iter < iter_end) { auto const ch = *iter++; if (ch == '.' && !decimal_found) { decimal_found = true; continue; } if (ch < '0' || ch > '9') { --iter; break; } if (value > decimal_max) { exp_offset += static_cast<int32_t>(!decimal_found); } else { value = (value * 10) + static_cast<uint64_t>(ch - '0'); exp_offset -= static_cast<int32_t>(decimal_found); } } return {value, exp_offset}; } /** * @brief Return the exponent of a decimal string. * * This should only be called after the exponent 'e' notation was detected. * The return is the exponent (base-10) integer and can only be * invalid if `check_only == true` and invalid characters are found or the * exponent overflows an int32. * * @tparam check_only Set to true to verify the characters are valid and the * exponent value in the decimal string does not overflow int32 * @param[in,out] iter Start of characters to parse * (points to the character after the 'E' or 'e') * @param[in] end End of characters to parse * @return Integer value of the exponent */ template <bool check_only = false> __device__ thrust::optional<int32_t> parse_exponent(char const* iter, char const* iter_end) const { constexpr uint32_t exponent_max = static_cast<uint32_t>(std::numeric_limits<int32_t>::max()); // get optional exponent sign int32_t const exp_sign = [&iter] { auto const ch = *iter; if (ch != '-' && ch != '+') { return 1; } ++iter; return (ch == '-' ? -1 : 1); }(); // parse exponent integer int32_t exp_ten = 0; while (iter < iter_end) { auto const ch = *iter++; if (ch < '0' || ch > '9') { if (check_only) { return thrust::nullopt; } break; } uint32_t exp_check = static_cast<uint32_t>(exp_ten * 10) + static_cast<uint32_t>(ch - '0'); if (check_only && (exp_check > exponent_max)) { return thrust::nullopt; } // check overflow exp_ten = static_cast<int32_t>(exp_check); } return exp_ten * exp_sign; } }; /** * @brief Converts strings into an integers and records decimal places. * * The conversion uses the provided scale to build the resulting * integer. This can prevent overflow for strings with many digits. */ template <typename DecimalType> struct string_to_decimal_fn : string_to_decimal_base { column_device_view const d_strings; int32_t const scale; string_to_decimal_fn(column_device_view const& d_strings, int32_t scale) : d_strings(d_strings), scale(scale) { } __device__ DecimalType operator()(size_type idx) const { if (d_strings.is_null(idx)) { return 0; } auto const d_str = d_strings.element<string_view>(idx); if (d_str.empty()) { return 0; } auto const sign = [&] { if (d_str.data()[0] == '-') { return -1; } if (d_str.data()[0] == '+') { return 1; } return 0; }(); auto iter = d_str.data() + (sign != 0); auto const iter_end = d_str.data() + d_str.size_bytes(); auto [value, exp_offset] = parse_integer(iter, iter_end); if (value == 0) { return DecimalType{0}; } // check for exponent int32_t exp_ten = 0; if ((iter < iter_end) && (*iter == 'e' || *iter == 'E')) { ++iter; if (iter < iter_end) { exp_ten = parse_exponent<false>(iter, iter_end).value(); } } exp_ten += exp_offset; // shift the output value based on the exp_ten and the scale values if (exp_ten < scale) { value = value / static_cast<uint64_t>(exp10(static_cast<double>(scale - exp_ten))); } else { value = value * static_cast<uint64_t>(exp10(static_cast<double>(exp_ten - scale))); } return static_cast<DecimalType>(value) * (sign == 0 ? 1 : sign); } }; /** * @brief This only checks the string format for valid decimal characters. * * This follows closely the logic above but just ensures there are valid * characters for conversion and the integer component does not overflow. */ template <typename DecimalType> struct string_to_decimal_check_fn : string_to_decimal_base { column_device_view const d_strings; int32_t const scale; string_to_decimal_check_fn(column_device_view const& d_strings, int32_t scale) : d_strings(d_strings), scale(scale) { } __device__ bool operator()(size_type idx) const { if (d_strings.is_null(idx)) { return false; } auto const d_str = d_strings.element<string_view>(idx); if (d_str.empty()) { return false; } auto iter = d_str.data() + static_cast<int>((d_str.data()[0] == '-' || d_str.data()[0] == '+')); auto const iter_end = d_str.data() + d_str.size_bytes(); auto [value, exp_offset] = parse_integer(iter, iter_end); // only exponent notation is expected here if ((iter < iter_end) && (*iter != 'e' && *iter != 'E')) { return false; } ++iter; int32_t exp_ten = 0; // check exponent overflow if (iter < iter_end) { auto exp_result = parse_exponent<true>(iter, iter_end); if (!exp_result) { return false; } exp_ten = exp_result.value(); } exp_ten += exp_offset; // finally, check for overflow based on the exp_ten and scale values return (exp_ten < scale) ? true : value <= static_cast<uint64_t>( std::numeric_limits<DecimalType>::max() / static_cast<DecimalType>(exp10(static_cast<double>(exp_ten - scale)))); } }; /** * @brief The dispatch function for converting strings column to fixed-point column. */ struct dispatch_to_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; auto const d_column = column_device_view::create(input.parent(), stream); // create output column auto results = make_fixed_point_column(output_type, input.size(), cudf::detail::copy_bitmask(input.parent(), stream, mr), input.null_count(), stream, mr); auto d_results = results->mutable_view().data<DecimalType>(); // convert strings into decimal values thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(input.size()), d_results, string_to_decimal_fn<DecimalType>{*d_column, output_type.scale()}); results->set_null_count(input.null_count()); return results; } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const&, data_type, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Output for to_fixed_point must be a decimal type."); } }; } // namespace // This will convert a strings column into any integer column type. std::unique_ptr<column> to_fixed_point(strings_column_view const& input, data_type output_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return make_empty_column(output_type); return type_dispatcher(output_type, dispatch_to_fixed_point_fn{}, input, output_type, stream, mr); } } // namespace detail // external API std::unique_ptr<column> to_fixed_point(strings_column_view const& strings, data_type output_type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::to_fixed_point(strings, output_type, rmm::cuda_stream_default, mr); } namespace detail { namespace { /** * @brief Calculate the size of the each string required for * converting each value in base-10 format. * * ouput format is [-]integer.fraction */ template <typename DecimalType> struct decimal_to_string_size_fn { column_device_view const d_column; __device__ int32_t operator()(size_type idx) const { if (d_column.is_null(idx)) return 0; auto const value = d_column.element<DecimalType>(idx); auto const scale = d_column.type().scale(); if (scale >= 0) return count_digits(value) + scale; auto const abs_value = std::abs(value); auto const exp_ten = static_cast<int64_t>(exp10(static_cast<double>(-scale))); auto const fraction = count_digits(abs_value % exp_ten); auto const num_zeros = std::max(0, (-scale - fraction)); return static_cast<int32_t>(value < 0) + // sign if negative count_digits(abs_value / exp_ten) + // integer 1 + // decimal point num_zeros + // zeros padding fraction; // size of fraction } }; /** * @brief Convert each value into a string. * * The value is converted into base-10 digits [0-9] * plus the decimal point and a negative sign prefix. */ template <typename DecimalType> struct decimal_to_string_fn { column_device_view const d_column; int32_t const* d_offsets; char* d_chars; __device__ void operator()(size_type idx) { if (d_column.is_null(idx)) return; auto const value = d_column.element<DecimalType>(idx); auto const scale = d_column.type().scale(); char* d_buffer = d_chars + d_offsets[idx]; if (scale >= 0) { d_buffer += integer_to_string(value, d_buffer); thrust::generate_n(thrust::seq, d_buffer, scale, []() { return '0'; }); // add zeros return; } // scale < 0 // write format: [-]integer.fraction // where integer = abs(value) / (10^abs(scale)) // fraction = abs(value) % (10^abs(scale)) auto const abs_value = std::abs(value); if (value < 0) *d_buffer++ = '-'; // add sign auto const exp_ten = static_cast<int64_t>(exp10(static_cast<double>(-scale))); auto const num_zeros = std::max(0, (-scale - count_digits(abs_value % exp_ten))); d_buffer += integer_to_string(abs_value / exp_ten, d_buffer); // add the integer part *d_buffer++ = '.'; // add decimal point thrust::generate_n(thrust::seq, d_buffer, num_zeros, []() { return '0'; }); // add zeros d_buffer += num_zeros; integer_to_string(abs_value % exp_ten, d_buffer); // add the fraction part } }; /** * @brief The dispatcher functor for converting fixed-point values into strings. */ struct dispatch_from_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; // underlying value type auto const d_column = column_device_view::create(input, stream); // build offsets column auto offsets_transformer_itr = cudf::detail::make_counting_transform_iterator( 0, decimal_to_string_size_fn<DecimalType>{*d_column}); auto offsets_column = detail::make_offsets_child_column( offsets_transformer_itr, offsets_transformer_itr + input.size(), stream, mr); auto const d_offsets = offsets_column->view().template data<int32_t>(); // build chars column auto const bytes = cudf::detail::get_value<int32_t>(offsets_column->view(), input.size(), stream); auto chars_column = detail::create_chars_child_column(input.size(), bytes, stream, mr); auto d_chars = chars_column->mutable_view().template data<char>(); thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), input.size(), decimal_to_string_fn<DecimalType>{*d_column, d_offsets, d_chars}); return make_strings_column(input.size(), std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input, stream, mr), stream, mr); } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("Values for from_fixed_point function must be a decimal type."); } }; } // namespace std::unique_ptr<column> from_fixed_point(column_view const& input, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return detail::make_empty_strings_column(stream, mr); return type_dispatcher(input.type(), dispatch_from_fixed_point_fn{}, input, stream, mr); } } // namespace detail // external API std::unique_ptr<column> from_fixed_point(column_view const& input, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::from_fixed_point(input, rmm::cuda_stream_default, mr); } namespace detail { namespace { struct dispatch_is_fixed_point_fn { template <typename T, std::enable_if_t<cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const& input, data_type decimal_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) const { using DecimalType = device_storage_type_t<T>; auto const d_column = column_device_view::create(input.parent(), stream); // create output column auto results = make_numeric_column(data_type{type_id::BOOL8}, input.size(), cudf::detail::copy_bitmask(input.parent(), stream, mr), input.null_count(), stream, mr); auto d_results = results->mutable_view().data<bool>(); // check strings for valid fixed-point chars thrust::transform(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(input.size()), d_results, string_to_decimal_check_fn<DecimalType>{*d_column, decimal_type.scale()}); results->set_null_count(input.null_count()); return results; } template <typename T, std::enable_if_t<not cudf::is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> operator()(strings_column_view const&, data_type, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) const { CUDF_FAIL("is_fixed_point is expecting a decimal type"); } }; } // namespace std::unique_ptr<column> is_fixed_point(strings_column_view const& input, data_type decimal_type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.is_empty()) return cudf::make_empty_column(data_type{type_id::BOOL8}); return type_dispatcher( decimal_type, dispatch_is_fixed_point_fn{}, input, decimal_type, stream, mr); } } // namespace detail std::unique_ptr<column> is_fixed_point(strings_column_view const& input, data_type decimal_type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::is_fixed_point(input, decimal_type, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
00e53efc93c6b3ed60ceccd59858790809dd852a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/hip/Resize.cuh> #include <c10/util/Exception.h> #include <THH/THHGeneral.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <cmath> #include <ATen/native/hip/arc_flag.h> namespace at { namespace native { Tensor& eye_out_cuda(Tensor& result, int64_t n) { return at::native::eye_out_cuda(result, n, /*m=*/-1); } Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) { TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n); if(m < 0) { m = n; } result.resize_({n, m}); result.zero_(); int64_t sz = std::min<int64_t>(n, m); int64_t stride = result.stride(0) + result.stride(1); Tensor diag = result.as_strided({sz}, {stride}); diag.fill_(1); return result; } Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.backend() == at::Backend::CUDA); AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, allocator->allocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = at::detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId()); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor ARCempty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.backend() == at::Backend::CUDA); AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, // allocator->allocate(nelements * dtype.itemsize()), allocator->ARCallocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = at::detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId()); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) { int newTid = ++arc_vm.global_tensor_id_; auto t = at::native::empty_cuda({0}, options); // std::cout << "empty_strided t newTid: " << newTid << std::endl; t.unsafeGetTensorImpl()->tensor_id = newTid; at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride); return t; } Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); result.resize_({n}); if (n < 30000) { // For small inputs, we offload it to CPU instead. auto result_cpu = at::empty({n}, result.options().device(kCPU)); randperm_out(result_cpu, n, generator); return result.copy_(result_cpu); } #if 0 // This if condition should never be true because if n >= 30000 and the tensor has a Half type, // check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here // for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this. if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid. auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA))); return result.copy_(randperm_out_cuda(result_float, n, generator)); } #endif // Generate random values for the keys array AT_DISPATCH_ALL_TYPES( result.scalar_type(), "randperm_out_cuda", [&] { auto keys = at::empty(result.sizes(), result.options()).random_(generator); auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>()); auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>()); auto state = globalContext().getTHCState(); THCThrustAllocator thrustAlloc(state); auto policy = thrust::hip::par(thrustAlloc).on(at::hip::getCurrentHIPStreamMasqueradingAsCUDA()); thrust::sequence(policy, result_data, result_data + n); // Use the sorted order of keys to rearrange the result array thrust::sort_by_key(policy, keys_data, keys_data + n, result_data); } ); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ namespace { // To find the max integer that does not exceed the root of an int64_t variable, // we could use a loop to test one bit at a time, which takes up to 31 // iterations. This would give the accurate result, but is relatively slow and // is an overkill for most cases where double's precision suffice. // // If we directly use sqrt to calculate the root, the convertion from int64_t // to double would lose 11 bits precision. // // The following solution uses sqrt directly for most cases, and would only // special handle it if there is indeed precision loss. __device__ inline int64_t resolve_root_int( int64_t b, int64_t cX4, int64_t x, int32_t sign) { int64_t bXb_cX4 = b*b - cX4; // potential precision loss could occur here when casting int64_t (63 bits // precision) to double (52 bits precision) double sr = ::sqrt((double)bXb_cX4); int64_t res = ::__double2ll_rd((-b + sign * sr)/2); // have to cast double to int64_t, otherwise it would only compare up to the // precision of a double variable, ignoring the precision loss if (bXb_cX4 != (int64_t) (sr * sr)) { // handle precision loss by using binary search int64_t llsr = ::__double2ll_rd(sr); // Use the following math to reduce search space. // Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss // let d = abs(bXb_cX4 - llsr * llsr), then we have: // z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d) // z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d) // Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)). // And the true value of row would also be with in range, // [res - sqrt(d), res + sqrt(d) + 1) // as the denominator would only reduce the precision penalty. int64_t diff = ::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr)))); // l never exceeds (could equal to) the target row index auto l = res > diff ? res - diff : 0; // r is always larger than the target row index auto r = res + diff + 1; // binary search for the correct answer x <<= 1; // the loop always compares with 2x, so do it once here while (l + 1 < r) { auto m = (l + r) >> 1; // for tril: // b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2 // for triu: // b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2 if (sign * (b + m) * m > x) { r = m; } else { l = m; } } res = l; } return res; } // f: the number of elements in the first row of the trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the tril as a top trapezoid stacked on a bottom rectangle. Assume x // corresponds to the coordinate (row, col) in the trapezoid, where the row and // the col both start from 0, then we have: // // (f + f + row - 1) * row / 2 <= x [1] // (f + f + row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (row + 2f - 1)row <= 2x // row^2 + (2f-1)row - 2x <= 0. [3] // // Based on ineuqality [3], we have the following coefficients for formula of // root: // a = 1 // b = 2f - 1 // c = -2x // There are two roots, and we should use the largest integer that does not // exceed the root on the right. Intuitively, it is because: // i) the valid solution range of row is between two roots, as it is <= 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 + (2f-1)row - 2x. // Therefore, the valid range of row lies in between the nadir point and // the larger root on the right. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b + sqrt(b^2 - 4c)) / 2) // col = x - (f + f + row - 1) * row / 2 __device__ inline void get_coordinate_in_tril_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = f - 1; auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x; row = resolve_root_int(b, cX4, x, 1); col = x - ((f + row - 1) * row >> 1); } // f: the number of elements in the first row of the bottom trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the triu as a top rectangle stacked on a bottom trapezoid, where the // trapezoid is upside down. Assume x corresponds to the coordinate (row, col) // in the bottom trapezoid, where the row and the col start from 0, then we // have: // // (f + f - row + 1) * row / 2 <= x [1] // (f + f - row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (-row + 2f + 1)row <= 2x // row^2 - (2f+1)row + 2x >= 0. [3] // // Based on ineuqality [3], we have the following coefficients for formula of // root: // a = 1 // b = -1 - 2f // c = 2x // There are two roots, and we should use the largest integer that does not // exceed the root on the left. Intuitively, it is because: // i) the valid solution range of row is outside of the two roots, as it is < // > 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 - (2f+1)row + 2x. // Therefore, the valid range of row lies to the left of the smaller root // on the left. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b - sqrt(b^2 - 4c)) / 2) // col = x - (f + f - row + 1) * row / 2 __device__ inline void get_coordinate_in_triu_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = -1 - f; auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x; row = resolve_root_int(b, cX4, x, -1); col = x - ((f - row + 1) * row >> 1) + row; } } // namespace template <typename scalar_t> __global__ #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif void tril_indices_kernel(scalar_t * tensor, int64_t row_offset, int64_t m_first_row, int64_t col, int64_t trapezoid_size, int64_t tril_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < tril_size) { int64_t r, c; if (linear_index < trapezoid_size) { // the coordinate is within the top trapezoid get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c); } else { // the coordinate falls in the bottom rectangle auto surplus = linear_index - trapezoid_size; // add the height of trapezoid: m_last_row (col) - m_first_row + 1 r = surplus / col + col - m_first_row + 1; c = surplus % col; } r += row_offset; tensor[linear_index] = r; tensor[linear_index + tril_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor tril_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto tril_size = get_tril_size(row, col, offset); auto tensor = empty_cuda({2, tril_size}, options); if (tril_size > 0) { auto m_first_row = offset > 0 ? std::min<int64_t>(col, 1 + offset) : // upper bounded by col row + offset > 0; // either 0 or 1 auto trapezoid_row_offset = std::max<int64_t>(0, -offset); auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1; int64_t rectangle_size = 0; if (rectangle_row_offset < row) { rectangle_size = (row - rectangle_row_offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using tril_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] { hipLaunchKernelGGL(( tril_indices_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), tensor.data<scalar_t>(), trapezoid_row_offset, m_first_row, col, tril_size - rectangle_size, tril_size); }); } return tensor; } template <typename scalar_t> __global__ void triu_indices_kernel(scalar_t * tensor, int64_t col_offset, int64_t m_first_row, int64_t col, int64_t rectangle_size, int64_t triu_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < triu_size) { int64_t r, c; if (linear_index < rectangle_size) { // the coordinate is within the top rectangle r = linear_index / col; c = linear_index % col; } else { // the coordinate falls in the bottom trapezoid get_coordinate_in_triu_trapezoid( m_first_row, linear_index - rectangle_size, r, c); r += rectangle_size / col; } c += col_offset; tensor[linear_index] = r; tensor[linear_index + triu_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor triu_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto triu_size = row * col - get_tril_size(row, col, offset - 1); auto tensor = empty_cuda({2, triu_size}, options); if (triu_size > 0) { // # of triu elements in the first row auto m_first_row = offset > 0 ? std::max<int64_t>(col - offset, 0) : // upper bounded by col col; // size of the top rectangle int64_t rectangle_size = 0; if (offset < 0) { rectangle_size = std::min<int64_t>(row, -offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using triu_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] { hipLaunchKernelGGL(( triu_indices_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), tensor.data<scalar_t>(), std::max<int64_t>(0, offset), m_first_row, col, rectangle_size, triu_size); }); } return tensor; } }} // namespace at::native
00e53efc93c6b3ed60ceccd59858790809dd852a.cu
#include <ATen/ATen.h> #include <ATen/InitialTensorOptions.h> #include <ATen/NativeFunctions.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/TensorFactories.h> #include <ATen/native/cuda/Resize.cuh> #include <c10/util/Exception.h> #include <THC/THCGeneral.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <algorithm> #include <cstddef> #include <cmath> #include <ATen/native/cuda/arc_flag.h> namespace at { namespace native { Tensor& eye_out_cuda(Tensor& result, int64_t n) { return at::native::eye_out_cuda(result, n, /*m=*/-1); } Tensor& eye_out_cuda(Tensor& result, int64_t n, int64_t m) { TORCH_CHECK(n >= 0, "n must be greater or equal to 0, got ", n); if(m < 0) { m = n; } result.resize_({n, m}); result.zero_(); int64_t sz = std::min<int64_t>(n, m); int64_t stride = result.stride(0) + result.stride(1); Tensor diag = result.as_strided({sz}, {stride}); diag.fill_(1); return result; } Tensor empty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.backend() == at::Backend::CUDA); AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, allocator->allocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = at::detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId()); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor ARCempty_cuda(IntArrayRef size, const TensorOptions& options, c10::optional<MemoryFormat> optional_memory_format) { AT_ASSERT(options.backend() == at::Backend::CUDA); AT_ASSERT(!options.is_variable()); // is_variable should have been 'unpacked' // TODO: remove this when Variable and Tensor are merged TORCH_CHECK(!options.pinned_memory(), "Only dense CPU tensors can be pinned"); check_size_nonnegative(size); auto* allocator = at::cuda::getCUDADeviceAllocator(); int64_t nelements = prod_intlist(size); auto dtype = options.dtype(); auto storage_impl = c10::make_intrusive<StorageImpl>( dtype, nelements, // allocator->allocate(nelements * dtype.itemsize()), allocator->ARCallocate(nelements * dtype.itemsize()), allocator, /*resizeable=*/true); auto tensor = at::detail::make_tensor<TensorImpl>(storage_impl, CUDATensorId()); // Default TensorImpl has size [0] if (size.size() != 1 || size[0] != 0) { tensor.unsafeGetTensorImpl()->set_sizes_contiguous(size); } auto memory_format = optional_memory_format.value_or(MemoryFormat::Contiguous); tensor.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); return tensor; } Tensor empty_strided_cuda(IntArrayRef size, IntArrayRef stride, const TensorOptions& options) { int newTid = ++arc_vm.global_tensor_id_; auto t = at::native::empty_cuda({0}, options); // std::cout << "empty_strided t newTid: " << newTid << std::endl; t.unsafeGetTensorImpl()->tensor_id = newTid; at::native::resize_impl_cuda_(t.unsafeGetTensorImpl(), size, stride); return t; } Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) { TORCH_CHECK(n >= 0, "n must be non-negative, got", n); check_supported_max_int_with_precision(n, result); result.resize_({n}); if (n < 30000) { // For small inputs, we offload it to CPU instead. auto result_cpu = at::empty({n}, result.options().device(kCPU)); randperm_out(result_cpu, n, generator); return result.copy_(result_cpu); } #if 0 // This if condition should never be true because if n >= 30000 and the tensor has a Half type, // check_supported_max_int_with_precision should have reported an error. This snippet is commented out but left here // for the sake of clarity, because Half in thrust is spotty, and we do not want future change unaware of this. if (result.scalar_type() == at::ScalarType::Half) { // Half in thrust is spotty. Avoid. auto result_float = at::empty({n}, initialTensorOptions().device(Device(DeviceType::CUDA))); return result.copy_(randperm_out_cuda(result_float, n, generator)); } #endif // Generate random values for the keys array AT_DISPATCH_ALL_TYPES( result.scalar_type(), "randperm_out_cuda", [&] { auto keys = at::empty(result.sizes(), result.options()).random_(generator); auto result_data = thrust::device_ptr<scalar_t>(result.data<scalar_t>()); auto keys_data = thrust::device_ptr<scalar_t>(keys.data<scalar_t>()); auto state = globalContext().getTHCState(); THCThrustAllocator thrustAlloc(state); auto policy = thrust::cuda::par(thrustAlloc).on(at::cuda::getCurrentCUDAStream()); thrust::sequence(policy, result_data, result_data + n); // Use the sorted order of keys to rearrange the result array thrust::sort_by_key(policy, keys_data, keys_data + n, result_data); } ); return result; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangle ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ namespace { // To find the max integer that does not exceed the root of an int64_t variable, // we could use a loop to test one bit at a time, which takes up to 31 // iterations. This would give the accurate result, but is relatively slow and // is an overkill for most cases where double's precision suffice. // // If we directly use sqrt to calculate the root, the convertion from int64_t // to double would lose 11 bits precision. // // The following solution uses sqrt directly for most cases, and would only // special handle it if there is indeed precision loss. __device__ inline int64_t resolve_root_int( int64_t b, int64_t cX4, int64_t x, int32_t sign) { int64_t bXb_cX4 = b*b - cX4; // potential precision loss could occur here when casting int64_t (63 bits // precision) to double (52 bits precision) double sr = ::sqrt((double)bXb_cX4); int64_t res = ::__double2ll_rd((-b + sign * sr)/2); // have to cast double to int64_t, otherwise it would only compare up to the // precision of a double variable, ignoring the precision loss if (bXb_cX4 != (int64_t) (sr * sr)) { // handle precision loss by using binary search int64_t llsr = ::__double2ll_rd(sr); // Use the following math to reduce search space. // Suppose z is the accurate result of sqrt(bXb_cX4) without precision loss // let d = abs(bXb_cX4 - llsr * llsr), then we have: // z = sqrt(bXb_cX4) <= sqrt(llsr * llsr + d) <= llsr + sqrt(d) // z = sqrt(bXb_cX4) >= sqrt(llsr * llsr - d) >= llsr - sqrt(d) // Hence, it is sufficient to search range [llsr - sqrt(d), llsr + sqrt(d)). // And the true value of row would also be with in range, // [res - sqrt(d), res + sqrt(d) + 1) // as the denominator would only reduce the precision penalty. int64_t diff = ::__double2ll_ru(::sqrt(::fabs((double)(bXb_cX4 - llsr * llsr)))); // l never exceeds (could equal to) the target row index auto l = res > diff ? res - diff : 0; // r is always larger than the target row index auto r = res + diff + 1; // binary search for the correct answer x <<= 1; // the loop always compares with 2x, so do it once here while (l + 1 < r) { auto m = (l + r) >> 1; // for tril: // b = 2f - 1, sign = 1, hence (2f + m - 1) * m / 2 // for triu: // b = -2f - 1, sign = -1, hence (2f - m + 1) * m / 2 if (sign * (b + m) * m > x) { r = m; } else { l = m; } } res = l; } return res; } // f: the number of elements in the first row of the trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the tril as a top trapezoid stacked on a bottom rectangle. Assume x // corresponds to the coordinate (row, col) in the trapezoid, where the row and // the col both start from 0, then we have: // // (f + f + row - 1) * row / 2 <= x [1] // (f + f + row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (row + 2f - 1)row <= 2x // row^2 + (2f-1)row - 2x <= 0. [3] // // Based on ineuqality [3], we have the following coefficients for formula of // root: // a = 1 // b = 2f - 1 // c = -2x // There are two roots, and we should use the largest integer that does not // exceed the root on the right. Intuitively, it is because: // i) the valid solution range of row is between two roots, as it is <= 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 + (2f-1)row - 2x. // Therefore, the valid range of row lies in between the nadir point and // the larger root on the right. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b + sqrt(b^2 - 4c)) / 2) // col = x - (f + f + row - 1) * row / 2 __device__ inline void get_coordinate_in_tril_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = f - 1; auto cX4 = - (x << 3); // 4 * c = 4 * (-2x) = -8x; row = resolve_root_int(b, cX4, x, 1); col = x - ((f + row - 1) * row >> 1); } // f: the number of elements in the first row of the bottom trapezoid. // x: the index of the target coordinates ordered by row and then column. // // View the triu as a top rectangle stacked on a bottom trapezoid, where the // trapezoid is upside down. Assume x corresponds to the coordinate (row, col) // in the bottom trapezoid, where the row and the col start from 0, then we // have: // // (f + f - row + 1) * row / 2 <= x [1] // (f + f - row) * (row + 1) / 2 > x [2] // // Therefore, row is the maximum integer satisfying the following inequality: // // (-row + 2f + 1)row <= 2x // row^2 - (2f+1)row + 2x >= 0. [3] // // Based on ineuqality [3], we have the following coefficients for formula of // root: // a = 1 // b = -1 - 2f // c = 2x // There are two roots, and we should use the largest integer that does not // exceed the root on the left. Intuitively, it is because: // i) the valid solution range of row is outside of the two roots, as it is < // > 0; // ii) as we count in more rows, the total # of elements should always // increase, hence so does the left-hand side row^2 - (2f+1)row + 2x. // Therefore, the valid range of row lies to the left of the smaller root // on the left. // Full proof can be derived from inequality [2]. So, we calculate the result // coordinate as: // // row = floor((-b - sqrt(b^2 - 4c)) / 2) // col = x - (f + f - row + 1) * row / 2 __device__ inline void get_coordinate_in_triu_trapezoid( int64_t f, int64_t x, int64_t & row, int64_t & col) { f <<= 1; // all statements use 2f, so only calculate it once here. auto b = -1 - f; auto cX4 = x << 3; // 4 * c = 4 * (2x) = 8x; row = resolve_root_int(b, cX4, x, -1); col = x - ((f - row + 1) * row >> 1) + row; } } // namespace template <typename scalar_t> __global__ #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif void tril_indices_kernel(scalar_t * tensor, int64_t row_offset, int64_t m_first_row, int64_t col, int64_t trapezoid_size, int64_t tril_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < tril_size) { int64_t r, c; if (linear_index < trapezoid_size) { // the coordinate is within the top trapezoid get_coordinate_in_tril_trapezoid(m_first_row, linear_index, r, c); } else { // the coordinate falls in the bottom rectangle auto surplus = linear_index - trapezoid_size; // add the height of trapezoid: m_last_row (col) - m_first_row + 1 r = surplus / col + col - m_first_row + 1; c = surplus % col; } r += row_offset; tensor[linear_index] = r; tensor[linear_index + tril_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor tril_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto tril_size = get_tril_size(row, col, offset); auto tensor = empty_cuda({2, tril_size}, options); if (tril_size > 0) { auto m_first_row = offset > 0 ? std::min<int64_t>(col, 1 + offset) : // upper bounded by col row + offset > 0; // either 0 or 1 auto trapezoid_row_offset = std::max<int64_t>(0, -offset); auto rectangle_row_offset = trapezoid_row_offset + col - m_first_row + 1; int64_t rectangle_size = 0; if (rectangle_row_offset < row) { rectangle_size = (row - rectangle_row_offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using tril_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(tril_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "tril_indices_cuda", [&] { tril_indices_kernel<<< dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( tensor.data<scalar_t>(), trapezoid_row_offset, m_first_row, col, tril_size - rectangle_size, tril_size); }); } return tensor; } template <typename scalar_t> __global__ void triu_indices_kernel(scalar_t * tensor, int64_t col_offset, int64_t m_first_row, int64_t col, int64_t rectangle_size, int64_t triu_size) { int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x; if (linear_index < triu_size) { int64_t r, c; if (linear_index < rectangle_size) { // the coordinate is within the top rectangle r = linear_index / col; c = linear_index % col; } else { // the coordinate falls in the bottom trapezoid get_coordinate_in_triu_trapezoid( m_first_row, linear_index - rectangle_size, r, c); r += rectangle_size / col; } c += col_offset; tensor[linear_index] = r; tensor[linear_index + triu_size] = c; } } // Some Large test cases for the fallback binary search path is disabled by // default to speed up CI tests and to avoid OOM error. When modifying the // implementation, please enable them in test/test_cuda.py and make sure they // pass on your local server. Tensor triu_indices_cuda( int64_t row, int64_t col, int64_t offset, const TensorOptions& options) { check_args(row, col, options); auto triu_size = row * col - get_tril_size(row, col, offset - 1); auto tensor = empty_cuda({2, triu_size}, options); if (triu_size > 0) { // # of triu elements in the first row auto m_first_row = offset > 0 ? std::max<int64_t>(col - offset, 0) : // upper bounded by col col; // size of the top rectangle int64_t rectangle_size = 0; if (offset < 0) { rectangle_size = std::min<int64_t>(row, -offset) * col; } dim3 dim_block = cuda::getApplyBlock(); dim3 dim_grid; // using triu_size instead of tensor.numel(), as each thread takes care of // two elements in the tensor. TORCH_CHECK( cuda::getApplyGrid(triu_size, dim_grid, tensor.get_device()), "unable to get dim grid"); AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, tensor.scalar_type(), "triu_indices_cuda", [&] { triu_indices_kernel<<< dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>( tensor.data<scalar_t>(), std::max<int64_t>(0, offset), m_first_row, col, rectangle_size, triu_size); }); } return tensor; } }} // namespace at::native
a2fb3b38431f052f57dc8c404691a6a5842dc0e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/layers/operator/multi_conv_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { static __global__ void rearange_multi_conv_forwardkernel(int count,int multi,int channels,int height,int width,const float * in, float *out) { CUDA_KERNEL_LOOP(index, count) { int n = index / width / height / channels; int c = index / width / height % channels; int h = index / width % height; int w = index % width; int m = h%multi*multi+w%multi; int in_index = (((n*multi*multi+m)*channels+c)*height/multi+h/multi)*width/multi+w/multi; out[index] = in[in_index]; } } static __global__ void rearange_multi_conv_backwardkernel(int count,int multi,int channels,int height,int width,const float * out_diff, float *in_diff) { CUDA_KERNEL_LOOP(index, count) { int n = index / width / height / channels; int c = index / width / height % channels; int h = index / width % height; int w = index % width; int m = h%multi*multi+w%multi; int in_index = (((n*multi*multi+m)*channels+c)*height/multi+h/multi)*width/multi+w/multi; in_diff[in_index] = out_diff[index]; } } void MultiConvolutionLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*> &top) { const float* bottom_data = bottom[0]->gpu_data(); float* top_data = buffer_top_->mutable_gpu_data(); float* col_data = col_buffer_->mutable_gpu_data(); const float* weight = this->blobs_[0]->gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int top_offset_ = height_out_ * width_out_ * multi_num_output_ / group_; int col_offset_ = height_out_ * width_out_ * kernel_size_ * kernel_size_ * channels / group_; int weight_offset_ = kernel_size_ * kernel_size_ * channels * multi_num_output_ / group_ / group_; for (int n = 0; n < num; ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels, height,width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, col_data); for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, multi_num_output_/ group_, height_out_*width_out_, kernel_size_*kernel_size_*channels/ group_, (float)1., weight+ weight_offset_ * g , col_data + col_offset_ * g, (float)0., top_data + top[0]->offset(n) + top_offset_ * g ); } if (this->layer_param_.convolution_param().bias_term()) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, multi_num_output_,height_out_*width_out_, 1, (float)1., this->blobs_[1]->gpu_data(), bias_multiplier_->gpu_data(), (float)1., top_data + top[0]->offset(n)); } } hipLaunchKernelGGL(( rearange_multi_conv_forwardkernel), dim3(CAFFE_GET_BLOCKS(buffer_top_->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, buffer_top_->count(),multi_,num_output_,height_out_* multi_,width_out_* multi_,buffer_top_->gpu_data(),top[0]->mutable_gpu_data()); } void MultiConvolutionLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); //--------------------------------------------------------------------------------------------------- buffer_top_->Reshape(num,multi_num_output_,height_out_,width_out_); col_buffer_->Reshape(kernel_size_*kernel_size_*channels,height_out_*width_out_,1,1); if (this->layer_param_.convolution_param().bias_term()) { bias_multiplier_->Reshape(1,1,height_out_,width_out_); caffe_gpu_set(bias_multiplier_->count(),float(1),bias_multiplier_->mutable_gpu_data()); } //--------------------------------------------------------------------------------------------------- hipLaunchKernelGGL(( rearange_multi_conv_backwardkernel), dim3(CAFFE_GET_BLOCKS(buffer_top_->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, buffer_top_->count(),multi_,num_output_,height_out_* multi_,width_out_* multi_,top[0]->gpu_diff(),buffer_top_->mutable_gpu_diff()); const float* top_diff = buffer_top_->gpu_diff(); const float* weight = this->blobs_[0]->gpu_data(); const float* bottom_data = bottom[0]->gpu_data(); float* bottom_diff = bottom[0]->mutable_gpu_diff(); float* weight_diff = this->blobs_[0]->mutable_gpu_diff(); float* col_data = col_buffer_->mutable_gpu_data(); float* col_diff = col_buffer_->mutable_gpu_diff(); if (this->layer_param_.convolution_param().bias_term()) { float* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { caffe_gpu_gemv(CblasNoTrans, multi_num_output_, height_out_ * width_out_, (float)1., top_diff + top[0]->offset(n), bias_multiplier_->gpu_data(), (float)1., bias_diff); } } int top_offset_ = height_out_ * width_out_ * multi_num_output_ / group_; int col_offset_ = height_out_ * width_out_ * kernel_size_ * kernel_size_ * channels / group_; int weight_offset_ = kernel_size_ * kernel_size_ * channels * multi_num_output_ / group_ / group_; for (int n = 0; n < num; ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels, height,width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, col_data); for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasNoTrans, CblasTrans, multi_num_output_ / group_, kernel_size_*kernel_size_*channels / group_, height_out_*width_out_, (float)1., top_diff + top[0]->offset(n) + top_offset_ * g, col_data + col_offset_ * g, (float)1., weight_diff + weight_offset_ * g); } } for (int n = 0; n < num; ++n) { for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_size_*kernel_size_*channels/ group_, height_out_*width_out_, multi_num_output_/ group_, (float)1., weight + weight_offset_ * g, top_diff + top[0]->offset(n) + top_offset_ * g, (float)0., col_diff + col_offset_ * g); } col2im_gpu(col_diff, channels, height, width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, bottom_diff + bottom[0]->offset(n)); } } void MultiConvolutionLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*> &top) { } } // namespace caffe
a2fb3b38431f052f57dc8c404691a6a5842dc0e4.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/layers/operator/multi_conv_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { static __global__ void rearange_multi_conv_forwardkernel(int count,int multi,int channels,int height,int width,const float * in, float *out) { CUDA_KERNEL_LOOP(index, count) { int n = index / width / height / channels; int c = index / width / height % channels; int h = index / width % height; int w = index % width; int m = h%multi*multi+w%multi; int in_index = (((n*multi*multi+m)*channels+c)*height/multi+h/multi)*width/multi+w/multi; out[index] = in[in_index]; } } static __global__ void rearange_multi_conv_backwardkernel(int count,int multi,int channels,int height,int width,const float * out_diff, float *in_diff) { CUDA_KERNEL_LOOP(index, count) { int n = index / width / height / channels; int c = index / width / height % channels; int h = index / width % height; int w = index % width; int m = h%multi*multi+w%multi; int in_index = (((n*multi*multi+m)*channels+c)*height/multi+h/multi)*width/multi+w/multi; in_diff[in_index] = out_diff[index]; } } void MultiConvolutionLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*> &top) { const float* bottom_data = bottom[0]->gpu_data(); float* top_data = buffer_top_->mutable_gpu_data(); float* col_data = col_buffer_->mutable_gpu_data(); const float* weight = this->blobs_[0]->gpu_data(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); int top_offset_ = height_out_ * width_out_ * multi_num_output_ / group_; int col_offset_ = height_out_ * width_out_ * kernel_size_ * kernel_size_ * channels / group_; int weight_offset_ = kernel_size_ * kernel_size_ * channels * multi_num_output_ / group_ / group_; for (int n = 0; n < num; ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels, height,width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, col_data); for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, multi_num_output_/ group_, height_out_*width_out_, kernel_size_*kernel_size_*channels/ group_, (float)1., weight+ weight_offset_ * g , col_data + col_offset_ * g, (float)0., top_data + top[0]->offset(n) + top_offset_ * g ); } if (this->layer_param_.convolution_param().bias_term()) { caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, multi_num_output_,height_out_*width_out_, 1, (float)1., this->blobs_[1]->gpu_data(), bias_multiplier_->gpu_data(), (float)1., top_data + top[0]->offset(n)); } } rearange_multi_conv_forwardkernel<<<CAFFE_GET_BLOCKS(buffer_top_->count()), CAFFE_CUDA_NUM_THREADS>>> (buffer_top_->count(),multi_,num_output_,height_out_* multi_,width_out_* multi_,buffer_top_->gpu_data(),top[0]->mutable_gpu_data()); } void MultiConvolutionLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); //--------------------------------------------------------------------------------------------------- buffer_top_->Reshape(num,multi_num_output_,height_out_,width_out_); col_buffer_->Reshape(kernel_size_*kernel_size_*channels,height_out_*width_out_,1,1); if (this->layer_param_.convolution_param().bias_term()) { bias_multiplier_->Reshape(1,1,height_out_,width_out_); caffe_gpu_set(bias_multiplier_->count(),float(1),bias_multiplier_->mutable_gpu_data()); } //--------------------------------------------------------------------------------------------------- rearange_multi_conv_backwardkernel<<<CAFFE_GET_BLOCKS(buffer_top_->count()), CAFFE_CUDA_NUM_THREADS>>> (buffer_top_->count(),multi_,num_output_,height_out_* multi_,width_out_* multi_,top[0]->gpu_diff(),buffer_top_->mutable_gpu_diff()); const float* top_diff = buffer_top_->gpu_diff(); const float* weight = this->blobs_[0]->gpu_data(); const float* bottom_data = bottom[0]->gpu_data(); float* bottom_diff = bottom[0]->mutable_gpu_diff(); float* weight_diff = this->blobs_[0]->mutable_gpu_diff(); float* col_data = col_buffer_->mutable_gpu_data(); float* col_diff = col_buffer_->mutable_gpu_diff(); if (this->layer_param_.convolution_param().bias_term()) { float* bias_diff = this->blobs_[1]->mutable_gpu_diff(); for (int n = 0; n < num; ++n) { caffe_gpu_gemv(CblasNoTrans, multi_num_output_, height_out_ * width_out_, (float)1., top_diff + top[0]->offset(n), bias_multiplier_->gpu_data(), (float)1., bias_diff); } } int top_offset_ = height_out_ * width_out_ * multi_num_output_ / group_; int col_offset_ = height_out_ * width_out_ * kernel_size_ * kernel_size_ * channels / group_; int weight_offset_ = kernel_size_ * kernel_size_ * channels * multi_num_output_ / group_ / group_; for (int n = 0; n < num; ++n) { im2col_gpu(bottom_data + bottom[0]->offset(n), channels, height,width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, col_data); for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasNoTrans, CblasTrans, multi_num_output_ / group_, kernel_size_*kernel_size_*channels / group_, height_out_*width_out_, (float)1., top_diff + top[0]->offset(n) + top_offset_ * g, col_data + col_offset_ * g, (float)1., weight_diff + weight_offset_ * g); } } for (int n = 0; n < num; ++n) { for (int g = 0; g < group_; g++) { caffe_gpu_gemm(CblasTrans, CblasNoTrans, kernel_size_*kernel_size_*channels/ group_, height_out_*width_out_, multi_num_output_/ group_, (float)1., weight + weight_offset_ * g, top_diff + top[0]->offset(n) + top_offset_ * g, (float)0., col_diff + col_offset_ * g); } col2im_gpu(col_diff, channels, height, width, kernel_size_, kernel_size_, pad_, pad_, stride_, stride_, filter_stride_, filter_stride_, bottom_diff + bottom[0]->offset(n)); } } void MultiConvolutionLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*> &top) { } } // namespace caffe
27a5ba478478b3c674c4b7b281ab25d88d89015b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021 Jisang Yoon // All rights reserved. // // This source code is licensed under the Apache 2.0 license found in the // LICENSE file in the root directory of this source tree. #include "culda/culda.hpp" #include "culda/cuda_lda_kernels.cuh" namespace cusim { CuLDA::CuLDA() { logger_container_.reset(new CuSimLogger("lda")); logger_ = logger_container_->get_logger(); dev_info_ = GetDeviceInfo(); if (dev_info_.unknown) DEBUG0("Unknown device type"); INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}", dev_info_.major, dev_info_.minor, dev_info_.mp_cnt, dev_info_.cores); } CuLDA::~CuLDA() {} bool CuLDA::Init(std::string opt_path) { std::ifstream in(opt_path.c_str()); if (not in.is_open()) return false; std::string str((std::istreambuf_iterator<char>(in)), std::istreambuf_iterator<char>()); std::string err_cmt; auto _opt = json11::Json::parse(str, err_cmt); if (not err_cmt.empty()) return false; opt_ = _opt; logger_container_->set_log_level(opt_["c_log_level"].int_value()); num_topics_ = opt_["num_topics"].int_value(); block_dim_ = opt_["block_dim"].int_value(); block_cnt_ = opt_["hyper_threads"].number_value() * (dev_info_.cores / block_dim_); INFO("num_topics: {}, block_dim: {}, block_cnt: {}", num_topics_, block_dim_, block_cnt_); return true; } void CuLDA::LoadModel(float* alpha, float* beta, float* grad_alpha, float* new_beta, int num_words) { num_words_ = num_words; DEBUG("copy model({} x {})", num_words_, num_topics_); dev_alpha_.resize(num_topics_); dev_beta_.resize(num_topics_ * num_words_); thrust::copy(alpha, alpha + num_topics_, dev_alpha_.begin()); thrust::copy(beta, beta + num_topics_ * num_words_, dev_beta_.begin()); alpha_ = alpha; beta_ = beta; // resize device vector grad_alpha_ = grad_alpha; new_beta_ = new_beta; dev_grad_alpha_.resize(num_topics_ * block_cnt_); dev_new_beta_.resize(num_topics_ * num_words_); // copy to device thrust::copy(grad_alpha_, grad_alpha_ + block_cnt_ * num_topics_, dev_grad_alpha_.begin()); thrust::copy(new_beta_, new_beta_ + num_words_ * num_topics_, dev_new_beta_.begin()); // set locks dev_locks_.resize(num_words_); std::vector<int> host_locks(num_words_, 0); thrust::copy(host_locks.begin(), host_locks.end(), dev_locks_.begin()); CHECK_CUDA(hipDeviceSynchronize()); } std::pair<float, float> CuLDA::FeedData( const int* cols, const int* indptr, const bool* vali, const float* counts, float* gamma, const bool init_gamma, const int num_cols, const int num_indptr, const int num_iters) { // copy feed data to GPU memory thrust::device_vector<int> dev_cols(num_cols); thrust::device_vector<int> dev_indptr(num_indptr + 1); thrust::device_vector<bool> dev_vali(num_cols); thrust::device_vector<float> dev_counts(num_cols); thrust::device_vector<float> dev_gamma(num_indptr * num_topics_); thrust::device_vector<float> dev_train_losses(block_cnt_, 0.0f); thrust::device_vector<float> dev_vali_losses(block_cnt_, 0.0f); thrust::copy(cols, cols + num_cols, dev_cols.begin()); thrust::copy(indptr, indptr + num_indptr + 1, dev_indptr.begin()); thrust::copy(vali, vali + num_cols, dev_vali.begin()); thrust::copy(counts, counts + num_cols, dev_counts.begin()); thrust::copy(gamma, gamma + num_indptr * num_topics_, dev_gamma.begin()); CHECK_CUDA(hipDeviceSynchronize()); DEBUG0("copy feed data to GPU memory"); // run E step in GPU hipLaunchKernelGGL(( EstepKernel), dim3(block_cnt_), dim3(block_dim_), 4 * num_topics_ * sizeof(float), 0, thrust::raw_pointer_cast(dev_cols.data()), thrust::raw_pointer_cast(dev_indptr.data()), thrust::raw_pointer_cast(dev_vali.data()), thrust::raw_pointer_cast(dev_counts.data()), init_gamma, num_cols, num_indptr, num_topics_, num_iters, thrust::raw_pointer_cast(dev_alpha_.data()), thrust::raw_pointer_cast(dev_beta_.data()), thrust::raw_pointer_cast(dev_gamma.data()), thrust::raw_pointer_cast(dev_grad_alpha_.data()), thrust::raw_pointer_cast(dev_new_beta_.data()), thrust::raw_pointer_cast(dev_train_losses.data()), thrust::raw_pointer_cast(dev_vali_losses.data()), thrust::raw_pointer_cast(dev_locks_.data())); CHECK_CUDA(hipDeviceSynchronize()); DEBUG0("run E step in GPU"); // pull loss std::vector<float> train_losses(block_cnt_), vali_losses(block_cnt_); thrust::copy(dev_train_losses.begin(), dev_train_losses.end(), train_losses.begin()); thrust::copy(dev_vali_losses.begin(), dev_vali_losses.end(), vali_losses.begin()); thrust::copy(dev_gamma.begin(), dev_gamma.end(), gamma); CHECK_CUDA(hipDeviceSynchronize()); DEBUG0("pull loss values"); // accumulate float train_loss = std::accumulate(train_losses.begin(), train_losses.end(), 0.0f); float vali_loss = std::accumulate(vali_losses.begin(), vali_losses.end(), 0.0f); return {train_loss, vali_loss}; } void CuLDA::Pull() { thrust::copy(dev_grad_alpha_.begin(), dev_grad_alpha_.end(), grad_alpha_); thrust::copy(dev_new_beta_.begin(), dev_new_beta_.end(), new_beta_); CHECK_CUDA(hipDeviceSynchronize()); } void CuLDA::Push() { thrust::copy(alpha_, alpha_ + num_topics_, dev_alpha_.begin()); thrust::copy(grad_alpha_, grad_alpha_ + block_cnt_ * num_topics_, dev_grad_alpha_.begin()); thrust::copy(beta_, beta_ + num_words_ * num_topics_, dev_beta_.begin()); thrust::copy(new_beta_, new_beta_ + num_words_ * num_topics_, dev_new_beta_.begin()); CHECK_CUDA(hipDeviceSynchronize()); } int CuLDA::GetBlockCnt() { return block_cnt_; } } // namespace cusim
27a5ba478478b3c674c4b7b281ab25d88d89015b.cu
// Copyright (c) 2021 Jisang Yoon // All rights reserved. // // This source code is licensed under the Apache 2.0 license found in the // LICENSE file in the root directory of this source tree. #include "culda/culda.hpp" #include "culda/cuda_lda_kernels.cuh" namespace cusim { CuLDA::CuLDA() { logger_container_.reset(new CuSimLogger("lda")); logger_ = logger_container_->get_logger(); dev_info_ = GetDeviceInfo(); if (dev_info_.unknown) DEBUG0("Unknown device type"); INFO("cuda device info, major: {}, minor: {}, multi processors: {}, cores: {}", dev_info_.major, dev_info_.minor, dev_info_.mp_cnt, dev_info_.cores); } CuLDA::~CuLDA() {} bool CuLDA::Init(std::string opt_path) { std::ifstream in(opt_path.c_str()); if (not in.is_open()) return false; std::string str((std::istreambuf_iterator<char>(in)), std::istreambuf_iterator<char>()); std::string err_cmt; auto _opt = json11::Json::parse(str, err_cmt); if (not err_cmt.empty()) return false; opt_ = _opt; logger_container_->set_log_level(opt_["c_log_level"].int_value()); num_topics_ = opt_["num_topics"].int_value(); block_dim_ = opt_["block_dim"].int_value(); block_cnt_ = opt_["hyper_threads"].number_value() * (dev_info_.cores / block_dim_); INFO("num_topics: {}, block_dim: {}, block_cnt: {}", num_topics_, block_dim_, block_cnt_); return true; } void CuLDA::LoadModel(float* alpha, float* beta, float* grad_alpha, float* new_beta, int num_words) { num_words_ = num_words; DEBUG("copy model({} x {})", num_words_, num_topics_); dev_alpha_.resize(num_topics_); dev_beta_.resize(num_topics_ * num_words_); thrust::copy(alpha, alpha + num_topics_, dev_alpha_.begin()); thrust::copy(beta, beta + num_topics_ * num_words_, dev_beta_.begin()); alpha_ = alpha; beta_ = beta; // resize device vector grad_alpha_ = grad_alpha; new_beta_ = new_beta; dev_grad_alpha_.resize(num_topics_ * block_cnt_); dev_new_beta_.resize(num_topics_ * num_words_); // copy to device thrust::copy(grad_alpha_, grad_alpha_ + block_cnt_ * num_topics_, dev_grad_alpha_.begin()); thrust::copy(new_beta_, new_beta_ + num_words_ * num_topics_, dev_new_beta_.begin()); // set locks dev_locks_.resize(num_words_); std::vector<int> host_locks(num_words_, 0); thrust::copy(host_locks.begin(), host_locks.end(), dev_locks_.begin()); CHECK_CUDA(cudaDeviceSynchronize()); } std::pair<float, float> CuLDA::FeedData( const int* cols, const int* indptr, const bool* vali, const float* counts, float* gamma, const bool init_gamma, const int num_cols, const int num_indptr, const int num_iters) { // copy feed data to GPU memory thrust::device_vector<int> dev_cols(num_cols); thrust::device_vector<int> dev_indptr(num_indptr + 1); thrust::device_vector<bool> dev_vali(num_cols); thrust::device_vector<float> dev_counts(num_cols); thrust::device_vector<float> dev_gamma(num_indptr * num_topics_); thrust::device_vector<float> dev_train_losses(block_cnt_, 0.0f); thrust::device_vector<float> dev_vali_losses(block_cnt_, 0.0f); thrust::copy(cols, cols + num_cols, dev_cols.begin()); thrust::copy(indptr, indptr + num_indptr + 1, dev_indptr.begin()); thrust::copy(vali, vali + num_cols, dev_vali.begin()); thrust::copy(counts, counts + num_cols, dev_counts.begin()); thrust::copy(gamma, gamma + num_indptr * num_topics_, dev_gamma.begin()); CHECK_CUDA(cudaDeviceSynchronize()); DEBUG0("copy feed data to GPU memory"); // run E step in GPU EstepKernel<<<block_cnt_, block_dim_, 4 * num_topics_ * sizeof(float)>>>( thrust::raw_pointer_cast(dev_cols.data()), thrust::raw_pointer_cast(dev_indptr.data()), thrust::raw_pointer_cast(dev_vali.data()), thrust::raw_pointer_cast(dev_counts.data()), init_gamma, num_cols, num_indptr, num_topics_, num_iters, thrust::raw_pointer_cast(dev_alpha_.data()), thrust::raw_pointer_cast(dev_beta_.data()), thrust::raw_pointer_cast(dev_gamma.data()), thrust::raw_pointer_cast(dev_grad_alpha_.data()), thrust::raw_pointer_cast(dev_new_beta_.data()), thrust::raw_pointer_cast(dev_train_losses.data()), thrust::raw_pointer_cast(dev_vali_losses.data()), thrust::raw_pointer_cast(dev_locks_.data())); CHECK_CUDA(cudaDeviceSynchronize()); DEBUG0("run E step in GPU"); // pull loss std::vector<float> train_losses(block_cnt_), vali_losses(block_cnt_); thrust::copy(dev_train_losses.begin(), dev_train_losses.end(), train_losses.begin()); thrust::copy(dev_vali_losses.begin(), dev_vali_losses.end(), vali_losses.begin()); thrust::copy(dev_gamma.begin(), dev_gamma.end(), gamma); CHECK_CUDA(cudaDeviceSynchronize()); DEBUG0("pull loss values"); // accumulate float train_loss = std::accumulate(train_losses.begin(), train_losses.end(), 0.0f); float vali_loss = std::accumulate(vali_losses.begin(), vali_losses.end(), 0.0f); return {train_loss, vali_loss}; } void CuLDA::Pull() { thrust::copy(dev_grad_alpha_.begin(), dev_grad_alpha_.end(), grad_alpha_); thrust::copy(dev_new_beta_.begin(), dev_new_beta_.end(), new_beta_); CHECK_CUDA(cudaDeviceSynchronize()); } void CuLDA::Push() { thrust::copy(alpha_, alpha_ + num_topics_, dev_alpha_.begin()); thrust::copy(grad_alpha_, grad_alpha_ + block_cnt_ * num_topics_, dev_grad_alpha_.begin()); thrust::copy(beta_, beta_ + num_words_ * num_topics_, dev_beta_.begin()); thrust::copy(new_beta_, new_beta_ + num_words_ * num_topics_, dev_new_beta_.begin()); CHECK_CUDA(cudaDeviceSynchronize()); } int CuLDA::GetBlockCnt() { return block_cnt_; } } // namespace cusim
0867ed075f894d8ddb909d41dabdf8a7aeb5e0bb.hip
// !!! This is a file automatically generated by hipify!!! #include <windows.h> #include <GL/glew.h> #include <GL/freeglut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> //#include <cutil_inline.h> //#include "datamanager.h" #include <math.h> //#include <cutil_math.h> #include <hip/hip_runtime_api.h> #include <cuda_gl_interop.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/remove.h> #include <thrust/copy.h> //#include <hip/hip_runtime.h> //#include <hiprand/hiprand_kernel.h> #include "Node.h" #include <iostream> #include "KDTree.h" #include "glm.h" using namespace std; #define PARAMETER_SIZE 40 #define INF 100000 #define EPSILON 0.0000001 #define PI 3.14159265 #define MAX_FLOAT 10000000.0f #define PHON 30 #define AMBIENT 0.2 #define SPECULAR 0.4 #define DIFFUSE 0.4 #define RGB_R 1.0 #define RGB_G 1.0 #define RGB_B 1.0 unsigned int* gpu_face; float* gpu_vertice; int* gpu_leafNodeIndex; KDLeafNode* gpu_leafNode; KDInsideNode* gpu_insideNode; GLubyte* gpu_color; int d_numFace; int d_numVertex; int d_width; int d_height; int d_sampleSize; int d_blockSize; float3* gpu_sec_pos; float3* gpu_sec_posDouble; float3* gpu_sec_normal; float3* gpu_sec_normalDouble; int* gpu_sec_index; int* gpu_sec_indexDouble; int d_numActiveRay; GLubyte* gpu_Scolor; float* gpu_parameter; float* gpu_rand; bool d_lightIn; bool d_secondary; struct is_filled { __host__ __device__ bool operator() (const int x) { return x != -1; } }; __device__ float Dot(Point& a, Point& b) { return (a.x*b.x + a.y*b.y + a.z*b.z); } __device__ Point CrossProduct(Point& a, Point& b) { Point ret; ret.x = a.y * b.z - a.z * b.y; ret.y = a.z * b.x - a.x * b.z; ret.z = a.x * b.y - a.y * b.x; return ret; } __device__ void Normalize(Point& vector) { float v=sqrtf(vector.x*vector.x+vector.y*vector.y+vector.z*vector.z); v =1.0f/v; vector.x *= v; vector.y *= v; vector.z *= v; } __device__ Point Point_minus(Point& p1, Point& p2) { Point ret; ret.x = p1.x - p2.x; ret.y = p1.y - p2.y; ret.z = p1.z - p2.z; return ret; } __device__ Point multi(Point& p1, Point& p2) { Point ret; ret.x = p1.x*p2.x; ret.y = p1.y*p2.y; ret.z = p1.z*p2.z; return ret; } __device__ Point make(float a, float b, float c) { Point ret; ret.x = a; ret.y = b; ret.z= c; return ret; } __device__ Point add(Point& p1, Point& p2) { Point ret; ret.x = p1.x+p2.x; ret.y = p1.y+p2.y; ret.z = p1.z+p2.z; return ret; } __device__ bool intersectionTriangle(Ray& ray, Point& p1, Point& p2, Point& p3, Point& normal, float& t) { float det, inv_det, u, v; Point e1 = Point_minus(p2, p1); Point e2 = Point_minus(p3, p1); Point pvec = CrossProduct(ray.dir, e2); normal = CrossProduct(e1, e2); Normalize(normal); det = Dot(e1, pvec); if(det < EPSILON && det > -EPSILON) return false; inv_det = 1.0f/det; Point tvec = Point_minus(ray.pos, p1); u = Dot(tvec, pvec)*inv_det; if( u <0.0f || u >1.0f) return false; Point qvec = CrossProduct(tvec, e1); v = Dot(ray.dir, qvec)*inv_det; if( v<0.0f || (u+v)>1.0f) return false; t = Dot(e2, qvec)*inv_det; if( t > 0.000001) return true; return false; } __device__ bool intersectLeaf(Ray& ray, int leaf, float* d_vertice, unsigned int * d_face, KDLeafNode* d_leafNode, int* d_leafNodeIndex,Point& d_normal, float& minDis) { if(leaf==-1) return false; int start = d_leafNode[leaf].begin; int end = d_leafNode[leaf].begin+d_leafNode[leaf].numObject; Point normal; int minObj = -1; float t; for(int m=start; m<end; m++) { Point PT1,PT2,PT3; PT1.x = d_vertice[d_face[d_leafNodeIndex[m]*3]*3]; PT1.y = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+1]; PT1.z = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+2]; PT2.x = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3]; PT2.y = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+1]; PT2.z = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+2]; PT3.x = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3]; PT3.y = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+1]; PT3.z = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+2]; if(intersectionTriangle(ray,PT1,PT2,PT3,normal,t)) { if(t < minDis) { minObj = m; minDis = t; d_normal.x = normal.x; d_normal.y = normal.y; d_normal.z = normal.z; } } } if(minObj == -1) return false; else return true; } //intersectBox method __device__ bool intersectBox(Ray& r, Point& boxmin, Point& boxmax, float &tnear, float &tfar) { Point invR ;//= make_Point(1.0f) / r.dir; invR.x = 1.0/r.dir.x; invR.y = 1.0/r.dir.y; invR.z = 1.0/r.dir.z; Point tbot = multi(invR , Point_minus(boxmin , r.pos)); Point ttop = multi(invR , Point_minus(boxmax , r.pos)); // re-order intersections to find smallest and largest on each axis Point tmin = make(min(ttop.x, tbot.x),min(ttop.y, tbot.y),min(ttop.z, tbot.z)); Point tmax = make(max(ttop.x, tbot.x),max(ttop.y, tbot.y),max(ttop.z, tbot.z)); // find the largest tmin and the smallest tmax float largest_tmin = max(max(tmin.x, tmin.y), max(tmin.x, tmin.z)); float smallest_tmax = min(min(tmax.x, tmax.y), min(tmax.x, tmax.z)); tnear = largest_tmin; tfar = smallest_tmax; return smallest_tmax > largest_tmin; } __device__ bool traverse(Ray& ray, KDInsideNode* d_insideNode, float* vertice, unsigned int* face, int* leafNodeIndex, KDLeafNode* leafNode, Point& normal, float& minDis) { int cur_index =0; float tnear,tfar; Point intersectionPoint; float intersectionvalue =0; Normalize(ray.dir); Stack stack[12]; int capacity = 0; int result = -1; float distance = MAX_FLOAT; Point tmp_normal; stack[capacity].index =0; stack[capacity].leafNode = false; capacity++; while(capacity>0) { capacity--; while(stack[capacity].leafNode&&capacity>=0)//leaf node intersection test { if(intersectLeaf(ray, stack[capacity].index, vertice, face, leafNode, leafNodeIndex,tmp_normal, distance)) { result = capacity; minDis = distance; normal.x = tmp_normal.x; normal.y = tmp_normal.y; normal.z = tmp_normal.z; } capacity--; if(capacity == 0) { if(result != -1) return true; else return false; } //continue; } if(!stack[capacity].leafNode)// interal node { cur_index = stack[capacity].index; if(intersectBox(ray, d_insideNode[cur_index].aabb.minPoint, d_insideNode[cur_index].aabb.maxPoint, tnear, tfar)) { intersectionPoint.x = ray.pos.x + tnear*ray.dir.x; intersectionPoint.y = ray.pos.y + tnear*ray.dir.y; intersectionPoint.z = ray.pos.z + tnear*ray.dir.z; switch(d_insideNode[cur_index].splitAxis) { case Axis_X: intersectionvalue = intersectionPoint.x; break; case Axis_Y: intersectionvalue = intersectionPoint.y; break; case Axis_Z: intersectionvalue = intersectionPoint.z; break; } if(intersectionvalue < d_insideNode[cur_index].splitValue) { // left part if(d_insideNode[cur_index].right!=-1) { if(d_insideNode[cur_index].RightLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].right; capacity++; } if(d_insideNode[cur_index].left!=-1) { if(d_insideNode[cur_index].LeftLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].left; capacity++; } } else { // right part if(d_insideNode[cur_index].left!=-1) { if(d_insideNode[cur_index].LeftLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].left; capacity++; } if(d_insideNode[cur_index].right!=-1) { if(d_insideNode[cur_index].RightLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].right; capacity++; } } } } } if(result == -1) return false; else return true; } __device__ void getRotationMatrix(Point& pa, Point& pb, float* matrix) { float cos_h, sin_h; Normalize(pa); Normalize(pb); cos_h = Dot(pa, pb); sin_h = sqrtf(1-cos_h*cos_h)*(-1.0); Point tmp; tmp.x = pa.y*pb.z - pa.z*pb.y; tmp.y = pa.z*pb.x - pa.x*pb.z; tmp.z = pa.x*pb.y - pa.y*pb.x; matrix[0] = tmp.x*tmp.x*(1.0f-cos_h) +cos_h; matrix[1] = tmp.x*tmp.y*(1.0f-cos_h) +sin_h*tmp.z; matrix[2] = tmp.x*tmp.z*(1.0f-cos_h) +sin_h*tmp.y; matrix[3] = tmp.x*tmp.y*(1.0f-cos_h) +sin_h*tmp.z; matrix[4] = tmp.y*tmp.y*(1.0f-cos_h) +cos_h; matrix[5] = tmp.y*tmp.z*(1.0f-cos_h) -sin_h*tmp.x; matrix[6] = tmp.x*tmp.z*(1.0f-cos_h) +sin_h*tmp.y; matrix[7] = tmp.z*tmp.y*(1.0f-cos_h) +sin_h*tmp.x; matrix[8] = tmp.z*tmp.z*(1.0f-cos_h) +cos_h; } __device__ void getSampleRayDir(float* gpu_rand, Point& mainDir, float* rayDir,int ray_index, int numActiveRay, int width, int height, int sampleSize) { //random value generation float random_1 = gpu_rand[ray_index]; float random_2 = gpu_rand[ray_index + width*height*sampleSize]; random_1 = random_1*2.0f*PI; random_2 = acosf(sqrtf(random_2)); Point random_ray; random_ray.x = sinf(random_1)*cosf(random_2); random_ray.y = sinf(random_1)*sinf(random_2); random_ray.z = cosf(random_2); float matrix[9]; Point normalize_dir; normalize_dir.x = 0.0f; normalize_dir.y = 0.0f; normalize_dir.z = 1.0f; getRotationMatrix(mainDir, normalize_dir, matrix); rayDir[0] = random_ray.x*matrix[0] + random_ray.y*matrix[1] + random_ray.z*matrix[2]; rayDir[1] = random_ray.x*matrix[3] + random_ray.y*matrix[4] + random_ray.z*matrix[5]; rayDir[2] = random_ray.x*matrix[6] + random_ray.y*matrix[7] + random_ray.z*matrix[8]; } __global__ void GenerateSecondImage(bool secondary, int numActiveRay,int* gpu_sec_index, unsigned char* gpu_color, unsigned char* s_color, int sampleSize) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index >= numActiveRay) return; int node_index = gpu_sec_index[index]; uint3 tmp_color; tmp_color.x = 0; tmp_color.y = 0; tmp_color.z = 0; #pragma unroll 32 for(int i = 0; i< sampleSize; i++) { tmp_color.x += s_color[3*(i + index*sampleSize) + 0]; tmp_color.y += s_color[3*(i + index*sampleSize) + 1]; tmp_color.z += s_color[3*(i + index*sampleSize) + 2]; } if(secondary == true) { gpu_color[node_index*3+2] = (unsigned char)(tmp_color.x/sampleSize); gpu_color[node_index*3+1] = (unsigned char)(tmp_color.y/sampleSize); gpu_color[node_index*3+0] = (unsigned char)(tmp_color.z/sampleSize); } else { gpu_color[node_index*3+2] += (unsigned char)(tmp_color.x/sampleSize); gpu_color[node_index*3+1] += (unsigned char)(tmp_color.y/sampleSize); gpu_color[node_index*3+0] += (unsigned char)(tmp_color.z/sampleSize); } } __global__ void SecondRender(float* gpu_rand, int numActiveRay, int sampleSize, int width, int height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode, float3* gpu_sec_node, float3* gpu_sec_normal, int* gpu_sec_index, unsigned char* s_color) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index >= sampleSize*numActiveRay) return; s_color[3*index +0] = 0; s_color[3*index +1] = 0; s_color[3*index +2] = 0; int idx = index/sampleSize; Point normal; normal.x = gpu_sec_normal[idx].x; normal.y = gpu_sec_normal[idx].y; normal.z = gpu_sec_normal[idx].z; Ray ray; ray.pos.x = gpu_sec_node[idx].x; ray.pos.y = gpu_sec_node[idx].y; ray.pos.z = gpu_sec_node[idx].z; Point eye; eye.x = parameter[9] -ray.pos.x ; eye.y = parameter[10] -ray.pos.y; eye.z = parameter[11] -ray.pos.z; Normalize(eye); float cos_ray = Dot(eye, normal); if(cos_ray <0.0f) return; Point main_dir; main_dir.x = 2.0f*cos_ray*normal.x - eye.x; main_dir.y = 2.0f*cos_ray*normal.y - eye.y; main_dir.z = 2.0f*cos_ray*normal.z - eye.z; float dir[3]; getSampleRayDir(gpu_rand, main_dir, dir ,index, numActiveRay, width, height,sampleSize); ray.dir.x = dir[0]; ray.dir.y = dir[1]; ray.dir.z = dir[2]; Normalize(ray.dir); float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { s_color[3*index +0] = 128; s_color[3*index +1] = 128; s_color[3*index +2] = 128; } } __global__ void RenderTracer(bool lightIn, int numFace, int numNode, int gpu_width, int gpu_height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color, float3* gpu_sec_pos, float3* gpu_sec_normal, int* gpu_sec_index) { //parameter [0-8]: rotation matrix, [9-17]: camera_location, direction, lookat, [18-23] lightPos light col, may be more lights int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<gpu_width*gpu_height) { gpu_sec_index[index] = -1; gpu_sec_pos[index].x = MAX_FLOAT; gpu_sec_pos[index].y = MAX_FLOAT; gpu_sec_pos[index].z = MAX_FLOAT; gpu_sec_normal[index].x = MAX_FLOAT; gpu_sec_normal[index].y = MAX_FLOAT; gpu_sec_normal[index].z = MAX_FLOAT; Point diffuse; Point ambient; Point specular; Point color; Point intersectionPoint; Point normal; float dif; Light light; light.pos.x = parameter[18]; light.pos.y = parameter[19]; light.pos.z = parameter[20]; light.col.x = parameter[21]; light.col.y = parameter[22]; light.col.z = parameter[23]; Ray ray; ray.pos.x = parameter[9]; ray.pos.y = parameter[10]; ray.pos.z = parameter[11]; float id_x, id_y; unsigned int _width, _height; _height = index/gpu_width; _width = index%gpu_width; id_x = (float)_width/gpu_height - 0.5f*(float)gpu_width/gpu_height; id_y = (float)_height/gpu_height -0.5f; ambient.x = id_x; ambient.y = -id_y; ambient.z = -1.0f; Normalize(ambient); ray.dir.x = ambient.x*parameter[0] + ambient.y*parameter[1] + ambient.z*parameter[2]; ray.dir.y = ambient.x*parameter[3] + ambient.y*parameter[4] + ambient.z*parameter[5]; ray.dir.z = ambient.x*parameter[6] + ambient.y*parameter[7] + ambient.z*parameter[8]; Normalize(ray.dir); ////////////////////////////////// //traverse this tree get the intersection point and normal ambient.x = ambient.y = ambient.z =0.0; diffuse.x = diffuse.y = diffuse.z =0.0; specular.x = specular.y = specular.z =0.0; float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { intersectionPoint.x = ray.pos.x + min_dis*ray.dir.x; intersectionPoint.y = ray.pos.y + min_dis*ray.dir.y; intersectionPoint.z = ray.pos.z + min_dis*ray.dir.z; gpu_sec_index[index] = index; gpu_sec_pos[index].x = intersectionPoint.x; gpu_sec_pos[index].y = intersectionPoint.y; gpu_sec_pos[index].z = intersectionPoint.z; gpu_sec_normal[index].x = normal.x; gpu_sec_normal[index].y = normal.y; gpu_sec_normal[index].z = normal.z; Point p; p.x = light.pos.x -intersectionPoint.x; p.y = light.pos.y -intersectionPoint.y; p.z = light.pos.z -intersectionPoint.z; Normalize(p); dif =Dot(p,normal); if(dif>0) { Ray rayEye; rayEye.pos.x = intersectionPoint.x; rayEye.pos.y = intersectionPoint.y; rayEye.pos.z = intersectionPoint.z; rayEye.dir.x = light.pos.x -intersectionPoint.x; rayEye.dir.y = light.pos.y -intersectionPoint.y; rayEye.dir.z = light.pos.z -intersectionPoint.z ; Normalize(rayEye.dir); if(lightIn != false) { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; } else if(traverse(rayEye, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis) == false)//shadows and occluused { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; } } } color.x = diffuse.x + ambient.x + specular.x; color.y = diffuse.y + ambient.y + specular.y; color.z = diffuse.z + ambient.z + specular.z; if(color.x >1.0) color.x =1.0; if(color.y >1.0) color.y =1.0; if(color.z >1.0) color.z =1.0; gpu_color[2+3*index] =(unsigned char)(color.x*255); gpu_color[1+3*index] =(unsigned char)(color.y*255); gpu_color[0+3*index] =(unsigned char)(color.z*255); } } __global__ void RenderKDTree(bool lightIn, int numFace, int numNode, int gpu_width, int gpu_height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color, float3* gpu_sec_pos, float3* gpu_sec_normal, int* gpu_sec_index) { //parameter [0-8]: rotation matrix, [9-17]: camera_location, direction, lookat, [18-23] lightPos light col, may be more lights int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<gpu_width*gpu_height) { gpu_sec_index[index] = -1; gpu_sec_pos[index].x = MAX_FLOAT; gpu_sec_pos[index].y = MAX_FLOAT; gpu_sec_pos[index].z = MAX_FLOAT; gpu_sec_normal[index].x = MAX_FLOAT; gpu_sec_normal[index].y = MAX_FLOAT; gpu_sec_normal[index].z = MAX_FLOAT; Point diffuse; Point ambient; Point specular; Point color; Point intersectionPoint; Point normal; float dif; float cos_data; Point reflect; Point rayFromEye; Light light; light.pos.x = parameter[18]; light.pos.y = parameter[19]; light.pos.z = parameter[20]; light.col.x = parameter[21]; light.col.y = parameter[22]; light.col.z = parameter[23]; Ray ray; ray.pos.x = parameter[9]; ray.pos.y = parameter[10]; ray.pos.z = parameter[11]; float id_x, id_y; unsigned int _width, _height; _height = index/gpu_width; _width = index%gpu_width; id_x = (float)_width/gpu_height - 0.5f*(float)gpu_width/gpu_height; id_y = (float)_height/gpu_height -0.5f; ambient.x = id_x; ambient.y = -id_y; ambient.z = -1.0f; Normalize(ambient); ray.dir.x = ambient.x*parameter[0] + ambient.y*parameter[1] + ambient.z*parameter[2]; ray.dir.y = ambient.x*parameter[3] + ambient.y*parameter[4] + ambient.z*parameter[5]; ray.dir.z = ambient.x*parameter[6] + ambient.y*parameter[7] + ambient.z*parameter[8]; Normalize(ray.dir); ////////////////////////////////// //traverse this tree get the intersection point and normal ambient.x = ambient.y = ambient.z =0.0; diffuse.x = diffuse.y = diffuse.z =0.0; specular.x = specular.y = specular.z =0.0; float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { intersectionPoint.x = ray.pos.x + min_dis*ray.dir.x; intersectionPoint.y = ray.pos.y + min_dis*ray.dir.y; intersectionPoint.z = ray.pos.z + min_dis*ray.dir.z; gpu_sec_index[index] = index; gpu_sec_pos[index].x = intersectionPoint.x; gpu_sec_pos[index].y = intersectionPoint.y; gpu_sec_pos[index].z = intersectionPoint.z; gpu_sec_normal[index].x = normal.x; gpu_sec_normal[index].y = normal.y; gpu_sec_normal[index].z = normal.z; ambient.x = AMBIENT*RGB_R; ambient.y = AMBIENT*RGB_G; ambient.z = AMBIENT*RGB_B; Point p; p.x = light.pos.x -intersectionPoint.x; p.y = light.pos.y -intersectionPoint.y; p.z = light.pos.z -intersectionPoint.z; Normalize(p); dif =Dot(p,normal); if(dif>0) { Ray rayEye; rayEye.pos.x = intersectionPoint.x; rayEye.pos.y = intersectionPoint.y; rayEye.pos.z = intersectionPoint.z; rayEye.dir.x = light.pos.x -intersectionPoint.x; rayEye.dir.y = light.pos.y -intersectionPoint.y; rayEye.dir.z = light.pos.z -intersectionPoint.z ; Normalize(rayEye.dir); //specular light calculation reflect.x = normal.x*2*dif-p.x; reflect.y = normal.y*2*dif-p.y; reflect.z = normal.z*2*dif-p.z; Normalize(reflect); rayFromEye.x = ray.pos.x - intersectionPoint.x; rayFromEye.y = ray.pos.y - intersectionPoint.y; rayFromEye.z = ray.pos.z - intersectionPoint.z; Normalize(rayFromEye); cos_data = Dot(reflect, rayFromEye); if(lightIn != false)//shadows and occluused { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; if(cos_data>0) { cos_data = powf(cos_data,PHON); specular.x = light.col.x*cos_data*SPECULAR; specular.y = light.col.y*cos_data*SPECULAR; specular.z = light.col.z*cos_data*SPECULAR; } } else if(traverse(rayEye, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis) == false) { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; if(cos_data>0) { cos_data = powf(cos_data,PHON); specular.x = light.col.x*cos_data*SPECULAR; specular.y = light.col.y*cos_data*SPECULAR; specular.z = light.col.z*cos_data*SPECULAR; } } } } color.x = diffuse.x + ambient.x + specular.x; color.y = diffuse.y + ambient.y + specular.y; color.z = diffuse.z + ambient.z + specular.z; if(color.x >1.0) color.x =1.0; if(color.y >1.0) color.y =1.0; if(color.z >1.0) color.z =1.0; gpu_color[2+3*index] =(unsigned char)(color.x*255); gpu_color[1+3*index] =(unsigned char)(color.y*255); gpu_color[0+3*index] =(unsigned char)(color.z*255); } } extern "C" void RayCasting(float* matrix, GLubyte* pixelData) { hipMemcpy(gpu_parameter, matrix, sizeof(float)*PARAMETER_SIZE, hipMemcpyHostToDevice); int blockSize = d_blockSize; int blocks = d_width*d_height/blockSize; if( (d_width*d_height)%blockSize !=0) blocks++; hipLaunchKernelGGL(( RenderKDTree), dim3(blocks),dim3(blockSize), 0, 0, d_lightIn, d_numFace, d_numVertex, d_width, d_height, gpu_parameter,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color, gpu_sec_pos, gpu_sec_normal, gpu_sec_index); //cutilDeviceSynchronize(); hipDeviceSynchronize(); hipMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(d_width*d_height*3), hipMemcpyDeviceToHost); } extern "C" void RayTracing(float* matrix, GLubyte* pixelData) { hipMemcpy(gpu_parameter, matrix, sizeof(float)*PARAMETER_SIZE, hipMemcpyHostToDevice); int blockSize = d_blockSize; int blocks = d_width*d_height/blockSize; if( (d_width*d_height)%blockSize !=0) blocks++; hipLaunchKernelGGL(( RenderTracer), dim3(blocks),dim3(blockSize), 0, 0, d_lightIn, d_numFace, d_numVertex, d_width, d_height, gpu_parameter,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color, gpu_sec_pos, gpu_sec_normal, gpu_sec_index); //cutilDeviceSynchronize(); hipDeviceSynchronize(); //compact the secondary ray thrust::device_ptr<int> index_ptr(gpu_sec_index); thrust::device_ptr<int> indexDouble_ptr(gpu_sec_indexDouble); hipMemcpy(gpu_sec_indexDouble, gpu_sec_index,sizeof(int)*d_width*d_height,hipMemcpyDeviceToDevice); thrust::device_ptr<float3> pos_ptr(gpu_sec_pos); thrust::device_ptr<float3> posDou_ptr(gpu_sec_posDouble); thrust::device_ptr<float3> normal_ptr(gpu_sec_normal); thrust::device_ptr<float3> normalDou_ptr(gpu_sec_normalDouble); thrust::copy_if(pos_ptr, pos_ptr+d_width*d_height,index_ptr, posDou_ptr, is_filled()); thrust::copy_if(normal_ptr, normal_ptr+d_width*d_height,index_ptr, normalDou_ptr, is_filled()); d_numActiveRay = thrust::count(index_ptr, index_ptr+d_width*d_height, -1); d_numActiveRay = d_width*d_height - d_numActiveRay; printf("result:%d\n",d_numActiveRay); thrust::remove_copy(indexDouble_ptr, indexDouble_ptr+d_width*d_height, index_ptr, -1); blocks = (d_numActiveRay*d_sampleSize)/blockSize; if( (d_numActiveRay*d_sampleSize)%blockSize != 0) blocks++; printf("blocks:%d, threads:%d\n", blocks, d_numActiveRay*d_sampleSize); hipDeviceSynchronize(); //cutilDeviceSynchronize(); hipLaunchKernelGGL(( SecondRender), dim3(blocks), dim3(blockSize), 0, 0, gpu_rand, d_numActiveRay, d_sampleSize, d_width, d_height ,gpu_parameter, gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode, gpu_sec_posDouble, gpu_sec_normalDouble, gpu_sec_index, gpu_Scolor); //cutilDeviceSynchronize(); hipDeviceSynchronize(); blocks = (d_numActiveRay)/blockSize; if( (d_numActiveRay)%blockSize != 0) blocks++; hipLaunchKernelGGL(( GenerateSecondImage), dim3(blocks), dim3(blockSize), 0, 0, d_secondary, d_numActiveRay,gpu_sec_index, gpu_color, gpu_Scolor,d_sampleSize); hipDeviceSynchronize(); //cutilDeviceSynchronize(); hipMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(d_width*d_height*3), hipMemcpyDeviceToHost); //cutilDeviceSynchronize(); hipDeviceSynchronize(); } extern "C" void Prepare_Data(bool secondary, bool lightIn, int blockSize, int sampleSize, int width, int height, GLMmodel* glm_model, KDTree& tree) { d_numFace = glm_model->numtriangles; d_numVertex = glm_model->numvertices; d_width = width; d_height = height; d_sampleSize = sampleSize; d_blockSize = blockSize; d_lightIn = lightIn; d_secondary = secondary; hipMalloc((void**)&gpu_rand, sizeof(float)*2*d_width*d_height*d_sampleSize); hipMalloc((void**)&gpu_Scolor, sizeof(GLubyte)*3*d_width*d_height*d_sampleSize); hipMalloc((void**)&gpu_parameter, sizeof(float)*PARAMETER_SIZE); hipMalloc((void**)&gpu_color, sizeof( GLubyte )*(d_width*d_height*3)); hipMalloc((void**)&gpu_face, sizeof( unsigned int )*(glm_model->numtriangles)*3); hipMalloc((void**)&gpu_vertice, sizeof( float )*(glm_model->numvertices+1)*3); hipMalloc((void**)&gpu_leafNode, sizeof(KDLeafNode)*(tree.numLeafNode)); hipMalloc((void**)&gpu_insideNode, sizeof(KDInsideNode)*(tree.numInsideNode)); hipMalloc((void**)&gpu_leafNodeIndex, sizeof(int)*(tree.SizeofLeafNode)); hipMalloc((void**)&gpu_sec_pos, sizeof(float3)*(d_width*d_height)); hipMalloc((void**)&gpu_sec_posDouble, sizeof(float3)*(d_width*d_height)); hipMalloc((void**)&gpu_sec_normal, sizeof(float3)*(d_width*d_height)); hipMalloc((void**)&gpu_sec_normalDouble, sizeof(float3)*(d_width*d_height)); hipMalloc((void**)&gpu_sec_index, sizeof(int)*(d_width*d_height)); hipMalloc((void**)&gpu_sec_indexDouble, sizeof(int)*(d_width*d_height)); hipMemcpy(gpu_face, glm_model->vindices, sizeof( unsigned int )*(glm_model->numtriangles)*3, hipMemcpyHostToDevice); hipMemcpy(gpu_vertice, glm_model->vertices, sizeof( float )*(glm_model->numvertices+1)*3, hipMemcpyHostToDevice); hipMemcpy(gpu_leafNode, tree.leafNode, sizeof(KDLeafNode)*(tree.numLeafNode), hipMemcpyHostToDevice); hipMemcpy(gpu_insideNode, tree.insideNode, sizeof(KDInsideNode)*(tree.numInsideNode), hipMemcpyHostToDevice); hipMemcpy(gpu_leafNodeIndex, tree.leafNodeIndex, sizeof(int)*(tree.SizeofLeafNode), hipMemcpyHostToDevice); float* tmp_random; tmp_random = (float*)malloc(sizeof(float)*2*d_width*d_height*d_sampleSize); for(int i = 0; i<d_width*d_height*d_sampleSize*2; i++) tmp_random[i] = ((float)rand()/RAND_MAX); hipMemcpy(gpu_rand, tmp_random, sizeof(float)*2*d_width*d_height*d_sampleSize, hipMemcpyHostToDevice); free(tmp_random); } extern "C" void Clean_Data() { hipFree(gpu_parameter); hipFree(gpu_vertice); hipFree(gpu_face); hipFree(gpu_leafNodeIndex); hipFree(gpu_insideNode); hipFree(gpu_leafNode); hipFree(gpu_color); hipFree(gpu_sec_pos); hipFree(gpu_sec_posDouble); hipFree(gpu_sec_normal); hipFree(gpu_sec_normalDouble); hipFree(gpu_sec_index); hipFree(gpu_sec_indexDouble); hipFree(gpu_Scolor); hipFree(gpu_rand); }
0867ed075f894d8ddb909d41dabdf8a7aeb5e0bb.cu
#include <windows.h> #include <GL/glew.h> #include <GL/freeglut.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> //#include <cutil_inline.h> //#include "datamanager.h" #include <math.h> //#include <cutil_math.h> #include <cuda_runtime_api.h> #include <cuda_gl_interop.h> #include <thrust/count.h> #include <thrust/device_vector.h> #include <thrust/remove.h> #include <thrust/copy.h> //#include <cuda.h> //#include <curand_kernel.h> #include "Node.h" #include <iostream> #include "KDTree.h" #include "glm.h" using namespace std; #define PARAMETER_SIZE 40 #define INF 100000 #define EPSILON 0.0000001 #define PI 3.14159265 #define MAX_FLOAT 10000000.0f #define PHON 30 #define AMBIENT 0.2 #define SPECULAR 0.4 #define DIFFUSE 0.4 #define RGB_R 1.0 #define RGB_G 1.0 #define RGB_B 1.0 unsigned int* gpu_face; float* gpu_vertice; int* gpu_leafNodeIndex; KDLeafNode* gpu_leafNode; KDInsideNode* gpu_insideNode; GLubyte* gpu_color; int d_numFace; int d_numVertex; int d_width; int d_height; int d_sampleSize; int d_blockSize; float3* gpu_sec_pos; float3* gpu_sec_posDouble; float3* gpu_sec_normal; float3* gpu_sec_normalDouble; int* gpu_sec_index; int* gpu_sec_indexDouble; int d_numActiveRay; GLubyte* gpu_Scolor; float* gpu_parameter; float* gpu_rand; bool d_lightIn; bool d_secondary; struct is_filled { __host__ __device__ bool operator() (const int x) { return x != -1; } }; __device__ float Dot(Point& a, Point& b) { return (a.x*b.x + a.y*b.y + a.z*b.z); } __device__ Point CrossProduct(Point& a, Point& b) { Point ret; ret.x = a.y * b.z - a.z * b.y; ret.y = a.z * b.x - a.x * b.z; ret.z = a.x * b.y - a.y * b.x; return ret; } __device__ void Normalize(Point& vector) { float v=sqrtf(vector.x*vector.x+vector.y*vector.y+vector.z*vector.z); v =1.0f/v; vector.x *= v; vector.y *= v; vector.z *= v; } __device__ Point Point_minus(Point& p1, Point& p2) { Point ret; ret.x = p1.x - p2.x; ret.y = p1.y - p2.y; ret.z = p1.z - p2.z; return ret; } __device__ Point multi(Point& p1, Point& p2) { Point ret; ret.x = p1.x*p2.x; ret.y = p1.y*p2.y; ret.z = p1.z*p2.z; return ret; } __device__ Point make(float a, float b, float c) { Point ret; ret.x = a; ret.y = b; ret.z= c; return ret; } __device__ Point add(Point& p1, Point& p2) { Point ret; ret.x = p1.x+p2.x; ret.y = p1.y+p2.y; ret.z = p1.z+p2.z; return ret; } __device__ bool intersectionTriangle(Ray& ray, Point& p1, Point& p2, Point& p3, Point& normal, float& t) { float det, inv_det, u, v; Point e1 = Point_minus(p2, p1); Point e2 = Point_minus(p3, p1); Point pvec = CrossProduct(ray.dir, e2); normal = CrossProduct(e1, e2); Normalize(normal); det = Dot(e1, pvec); if(det < EPSILON && det > -EPSILON) return false; inv_det = 1.0f/det; Point tvec = Point_minus(ray.pos, p1); u = Dot(tvec, pvec)*inv_det; if( u <0.0f || u >1.0f) return false; Point qvec = CrossProduct(tvec, e1); v = Dot(ray.dir, qvec)*inv_det; if( v<0.0f || (u+v)>1.0f) return false; t = Dot(e2, qvec)*inv_det; if( t > 0.000001) return true; return false; } __device__ bool intersectLeaf(Ray& ray, int leaf, float* d_vertice, unsigned int * d_face, KDLeafNode* d_leafNode, int* d_leafNodeIndex,Point& d_normal, float& minDis) { if(leaf==-1) return false; int start = d_leafNode[leaf].begin; int end = d_leafNode[leaf].begin+d_leafNode[leaf].numObject; Point normal; int minObj = -1; float t; for(int m=start; m<end; m++) { Point PT1,PT2,PT3; PT1.x = d_vertice[d_face[d_leafNodeIndex[m]*3]*3]; PT1.y = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+1]; PT1.z = d_vertice[d_face[d_leafNodeIndex[m]*3]*3+2]; PT2.x = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3]; PT2.y = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+1]; PT2.z = d_vertice[d_face[d_leafNodeIndex[m]*3+1]*3+2]; PT3.x = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3]; PT3.y = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+1]; PT3.z = d_vertice[d_face[d_leafNodeIndex[m]*3+2]*3+2]; if(intersectionTriangle(ray,PT1,PT2,PT3,normal,t)) { if(t < minDis) { minObj = m; minDis = t; d_normal.x = normal.x; d_normal.y = normal.y; d_normal.z = normal.z; } } } if(minObj == -1) return false; else return true; } //intersectBox method __device__ bool intersectBox(Ray& r, Point& boxmin, Point& boxmax, float &tnear, float &tfar) { Point invR ;//= make_Point(1.0f) / r.dir; invR.x = 1.0/r.dir.x; invR.y = 1.0/r.dir.y; invR.z = 1.0/r.dir.z; Point tbot = multi(invR , Point_minus(boxmin , r.pos)); Point ttop = multi(invR , Point_minus(boxmax , r.pos)); // re-order intersections to find smallest and largest on each axis Point tmin = make(min(ttop.x, tbot.x),min(ttop.y, tbot.y),min(ttop.z, tbot.z)); Point tmax = make(max(ttop.x, tbot.x),max(ttop.y, tbot.y),max(ttop.z, tbot.z)); // find the largest tmin and the smallest tmax float largest_tmin = max(max(tmin.x, tmin.y), max(tmin.x, tmin.z)); float smallest_tmax = min(min(tmax.x, tmax.y), min(tmax.x, tmax.z)); tnear = largest_tmin; tfar = smallest_tmax; return smallest_tmax > largest_tmin; } __device__ bool traverse(Ray& ray, KDInsideNode* d_insideNode, float* vertice, unsigned int* face, int* leafNodeIndex, KDLeafNode* leafNode, Point& normal, float& minDis) { int cur_index =0; float tnear,tfar; Point intersectionPoint; float intersectionvalue =0; Normalize(ray.dir); Stack stack[12]; int capacity = 0; int result = -1; float distance = MAX_FLOAT; Point tmp_normal; stack[capacity].index =0; stack[capacity].leafNode = false; capacity++; while(capacity>0) { capacity--; while(stack[capacity].leafNode&&capacity>=0)//leaf node intersection test { if(intersectLeaf(ray, stack[capacity].index, vertice, face, leafNode, leafNodeIndex,tmp_normal, distance)) { result = capacity; minDis = distance; normal.x = tmp_normal.x; normal.y = tmp_normal.y; normal.z = tmp_normal.z; } capacity--; if(capacity == 0) { if(result != -1) return true; else return false; } //continue; } if(!stack[capacity].leafNode)// interal node { cur_index = stack[capacity].index; if(intersectBox(ray, d_insideNode[cur_index].aabb.minPoint, d_insideNode[cur_index].aabb.maxPoint, tnear, tfar)) { intersectionPoint.x = ray.pos.x + tnear*ray.dir.x; intersectionPoint.y = ray.pos.y + tnear*ray.dir.y; intersectionPoint.z = ray.pos.z + tnear*ray.dir.z; switch(d_insideNode[cur_index].splitAxis) { case Axis_X: intersectionvalue = intersectionPoint.x; break; case Axis_Y: intersectionvalue = intersectionPoint.y; break; case Axis_Z: intersectionvalue = intersectionPoint.z; break; } if(intersectionvalue < d_insideNode[cur_index].splitValue) { // left part if(d_insideNode[cur_index].right!=-1) { if(d_insideNode[cur_index].RightLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].right; capacity++; } if(d_insideNode[cur_index].left!=-1) { if(d_insideNode[cur_index].LeftLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].left; capacity++; } } else { // right part if(d_insideNode[cur_index].left!=-1) { if(d_insideNode[cur_index].LeftLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].left; capacity++; } if(d_insideNode[cur_index].right!=-1) { if(d_insideNode[cur_index].RightLeaf) stack[capacity].leafNode = true; else stack[capacity].leafNode = false; stack[capacity].index = d_insideNode[cur_index].right; capacity++; } } } } } if(result == -1) return false; else return true; } __device__ void getRotationMatrix(Point& pa, Point& pb, float* matrix) { float cos_h, sin_h; Normalize(pa); Normalize(pb); cos_h = Dot(pa, pb); sin_h = sqrtf(1-cos_h*cos_h)*(-1.0); Point tmp; tmp.x = pa.y*pb.z - pa.z*pb.y; tmp.y = pa.z*pb.x - pa.x*pb.z; tmp.z = pa.x*pb.y - pa.y*pb.x; matrix[0] = tmp.x*tmp.x*(1.0f-cos_h) +cos_h; matrix[1] = tmp.x*tmp.y*(1.0f-cos_h) +sin_h*tmp.z; matrix[2] = tmp.x*tmp.z*(1.0f-cos_h) +sin_h*tmp.y; matrix[3] = tmp.x*tmp.y*(1.0f-cos_h) +sin_h*tmp.z; matrix[4] = tmp.y*tmp.y*(1.0f-cos_h) +cos_h; matrix[5] = tmp.y*tmp.z*(1.0f-cos_h) -sin_h*tmp.x; matrix[6] = tmp.x*tmp.z*(1.0f-cos_h) +sin_h*tmp.y; matrix[7] = tmp.z*tmp.y*(1.0f-cos_h) +sin_h*tmp.x; matrix[8] = tmp.z*tmp.z*(1.0f-cos_h) +cos_h; } __device__ void getSampleRayDir(float* gpu_rand, Point& mainDir, float* rayDir,int ray_index, int numActiveRay, int width, int height, int sampleSize) { //random value generation float random_1 = gpu_rand[ray_index]; float random_2 = gpu_rand[ray_index + width*height*sampleSize]; random_1 = random_1*2.0f*PI; random_2 = acosf(sqrtf(random_2)); Point random_ray; random_ray.x = sinf(random_1)*cosf(random_2); random_ray.y = sinf(random_1)*sinf(random_2); random_ray.z = cosf(random_2); float matrix[9]; Point normalize_dir; normalize_dir.x = 0.0f; normalize_dir.y = 0.0f; normalize_dir.z = 1.0f; getRotationMatrix(mainDir, normalize_dir, matrix); rayDir[0] = random_ray.x*matrix[0] + random_ray.y*matrix[1] + random_ray.z*matrix[2]; rayDir[1] = random_ray.x*matrix[3] + random_ray.y*matrix[4] + random_ray.z*matrix[5]; rayDir[2] = random_ray.x*matrix[6] + random_ray.y*matrix[7] + random_ray.z*matrix[8]; } __global__ void GenerateSecondImage(bool secondary, int numActiveRay,int* gpu_sec_index, unsigned char* gpu_color, unsigned char* s_color, int sampleSize) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index >= numActiveRay) return; int node_index = gpu_sec_index[index]; uint3 tmp_color; tmp_color.x = 0; tmp_color.y = 0; tmp_color.z = 0; #pragma unroll 32 for(int i = 0; i< sampleSize; i++) { tmp_color.x += s_color[3*(i + index*sampleSize) + 0]; tmp_color.y += s_color[3*(i + index*sampleSize) + 1]; tmp_color.z += s_color[3*(i + index*sampleSize) + 2]; } if(secondary == true) { gpu_color[node_index*3+2] = (unsigned char)(tmp_color.x/sampleSize); gpu_color[node_index*3+1] = (unsigned char)(tmp_color.y/sampleSize); gpu_color[node_index*3+0] = (unsigned char)(tmp_color.z/sampleSize); } else { gpu_color[node_index*3+2] += (unsigned char)(tmp_color.x/sampleSize); gpu_color[node_index*3+1] += (unsigned char)(tmp_color.y/sampleSize); gpu_color[node_index*3+0] += (unsigned char)(tmp_color.z/sampleSize); } } __global__ void SecondRender(float* gpu_rand, int numActiveRay, int sampleSize, int width, int height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode, float3* gpu_sec_node, float3* gpu_sec_normal, int* gpu_sec_index, unsigned char* s_color) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index >= sampleSize*numActiveRay) return; s_color[3*index +0] = 0; s_color[3*index +1] = 0; s_color[3*index +2] = 0; int idx = index/sampleSize; Point normal; normal.x = gpu_sec_normal[idx].x; normal.y = gpu_sec_normal[idx].y; normal.z = gpu_sec_normal[idx].z; Ray ray; ray.pos.x = gpu_sec_node[idx].x; ray.pos.y = gpu_sec_node[idx].y; ray.pos.z = gpu_sec_node[idx].z; Point eye; eye.x = parameter[9] -ray.pos.x ; eye.y = parameter[10] -ray.pos.y; eye.z = parameter[11] -ray.pos.z; Normalize(eye); float cos_ray = Dot(eye, normal); if(cos_ray <0.0f) return; Point main_dir; main_dir.x = 2.0f*cos_ray*normal.x - eye.x; main_dir.y = 2.0f*cos_ray*normal.y - eye.y; main_dir.z = 2.0f*cos_ray*normal.z - eye.z; float dir[3]; getSampleRayDir(gpu_rand, main_dir, dir ,index, numActiveRay, width, height,sampleSize); ray.dir.x = dir[0]; ray.dir.y = dir[1]; ray.dir.z = dir[2]; Normalize(ray.dir); float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { s_color[3*index +0] = 128; s_color[3*index +1] = 128; s_color[3*index +2] = 128; } } __global__ void RenderTracer(bool lightIn, int numFace, int numNode, int gpu_width, int gpu_height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color, float3* gpu_sec_pos, float3* gpu_sec_normal, int* gpu_sec_index) { //parameter [0-8]: rotation matrix, [9-17]: camera_location, direction, lookat, [18-23] lightPos light col, may be more lights int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<gpu_width*gpu_height) { gpu_sec_index[index] = -1; gpu_sec_pos[index].x = MAX_FLOAT; gpu_sec_pos[index].y = MAX_FLOAT; gpu_sec_pos[index].z = MAX_FLOAT; gpu_sec_normal[index].x = MAX_FLOAT; gpu_sec_normal[index].y = MAX_FLOAT; gpu_sec_normal[index].z = MAX_FLOAT; Point diffuse; Point ambient; Point specular; Point color; Point intersectionPoint; Point normal; float dif; Light light; light.pos.x = parameter[18]; light.pos.y = parameter[19]; light.pos.z = parameter[20]; light.col.x = parameter[21]; light.col.y = parameter[22]; light.col.z = parameter[23]; Ray ray; ray.pos.x = parameter[9]; ray.pos.y = parameter[10]; ray.pos.z = parameter[11]; float id_x, id_y; unsigned int _width, _height; _height = index/gpu_width; _width = index%gpu_width; id_x = (float)_width/gpu_height - 0.5f*(float)gpu_width/gpu_height; id_y = (float)_height/gpu_height -0.5f; ambient.x = id_x; ambient.y = -id_y; ambient.z = -1.0f; Normalize(ambient); ray.dir.x = ambient.x*parameter[0] + ambient.y*parameter[1] + ambient.z*parameter[2]; ray.dir.y = ambient.x*parameter[3] + ambient.y*parameter[4] + ambient.z*parameter[5]; ray.dir.z = ambient.x*parameter[6] + ambient.y*parameter[7] + ambient.z*parameter[8]; Normalize(ray.dir); ////////////////////////////////// //traverse this tree get the intersection point and normal ambient.x = ambient.y = ambient.z =0.0; diffuse.x = diffuse.y = diffuse.z =0.0; specular.x = specular.y = specular.z =0.0; float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { intersectionPoint.x = ray.pos.x + min_dis*ray.dir.x; intersectionPoint.y = ray.pos.y + min_dis*ray.dir.y; intersectionPoint.z = ray.pos.z + min_dis*ray.dir.z; gpu_sec_index[index] = index; gpu_sec_pos[index].x = intersectionPoint.x; gpu_sec_pos[index].y = intersectionPoint.y; gpu_sec_pos[index].z = intersectionPoint.z; gpu_sec_normal[index].x = normal.x; gpu_sec_normal[index].y = normal.y; gpu_sec_normal[index].z = normal.z; Point p; p.x = light.pos.x -intersectionPoint.x; p.y = light.pos.y -intersectionPoint.y; p.z = light.pos.z -intersectionPoint.z; Normalize(p); dif =Dot(p,normal); if(dif>0) { Ray rayEye; rayEye.pos.x = intersectionPoint.x; rayEye.pos.y = intersectionPoint.y; rayEye.pos.z = intersectionPoint.z; rayEye.dir.x = light.pos.x -intersectionPoint.x; rayEye.dir.y = light.pos.y -intersectionPoint.y; rayEye.dir.z = light.pos.z -intersectionPoint.z ; Normalize(rayEye.dir); if(lightIn != false) { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; } else if(traverse(rayEye, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis) == false)//shadows and occluused { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; } } } color.x = diffuse.x + ambient.x + specular.x; color.y = diffuse.y + ambient.y + specular.y; color.z = diffuse.z + ambient.z + specular.z; if(color.x >1.0) color.x =1.0; if(color.y >1.0) color.y =1.0; if(color.z >1.0) color.z =1.0; gpu_color[2+3*index] =(unsigned char)(color.x*255); gpu_color[1+3*index] =(unsigned char)(color.y*255); gpu_color[0+3*index] =(unsigned char)(color.z*255); } } __global__ void RenderKDTree(bool lightIn, int numFace, int numNode, int gpu_width, int gpu_height, float* parameter, float* gpu_vertice, unsigned int* gpu_face, int* gpu_index, KDInsideNode* gpu_insideNode, KDLeafNode* gpu_leafNode,unsigned char* gpu_color, float3* gpu_sec_pos, float3* gpu_sec_normal, int* gpu_sec_index) { //parameter [0-8]: rotation matrix, [9-17]: camera_location, direction, lookat, [18-23] lightPos light col, may be more lights int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<gpu_width*gpu_height) { gpu_sec_index[index] = -1; gpu_sec_pos[index].x = MAX_FLOAT; gpu_sec_pos[index].y = MAX_FLOAT; gpu_sec_pos[index].z = MAX_FLOAT; gpu_sec_normal[index].x = MAX_FLOAT; gpu_sec_normal[index].y = MAX_FLOAT; gpu_sec_normal[index].z = MAX_FLOAT; Point diffuse; Point ambient; Point specular; Point color; Point intersectionPoint; Point normal; float dif; float cos_data; Point reflect; Point rayFromEye; Light light; light.pos.x = parameter[18]; light.pos.y = parameter[19]; light.pos.z = parameter[20]; light.col.x = parameter[21]; light.col.y = parameter[22]; light.col.z = parameter[23]; Ray ray; ray.pos.x = parameter[9]; ray.pos.y = parameter[10]; ray.pos.z = parameter[11]; float id_x, id_y; unsigned int _width, _height; _height = index/gpu_width; _width = index%gpu_width; id_x = (float)_width/gpu_height - 0.5f*(float)gpu_width/gpu_height; id_y = (float)_height/gpu_height -0.5f; ambient.x = id_x; ambient.y = -id_y; ambient.z = -1.0f; Normalize(ambient); ray.dir.x = ambient.x*parameter[0] + ambient.y*parameter[1] + ambient.z*parameter[2]; ray.dir.y = ambient.x*parameter[3] + ambient.y*parameter[4] + ambient.z*parameter[5]; ray.dir.z = ambient.x*parameter[6] + ambient.y*parameter[7] + ambient.z*parameter[8]; Normalize(ray.dir); ////////////////////////////////// //traverse this tree get the intersection point and normal ambient.x = ambient.y = ambient.z =0.0; diffuse.x = diffuse.y = diffuse.z =0.0; specular.x = specular.y = specular.z =0.0; float min_dis = MAX_FLOAT; if(traverse(ray, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis)) { intersectionPoint.x = ray.pos.x + min_dis*ray.dir.x; intersectionPoint.y = ray.pos.y + min_dis*ray.dir.y; intersectionPoint.z = ray.pos.z + min_dis*ray.dir.z; gpu_sec_index[index] = index; gpu_sec_pos[index].x = intersectionPoint.x; gpu_sec_pos[index].y = intersectionPoint.y; gpu_sec_pos[index].z = intersectionPoint.z; gpu_sec_normal[index].x = normal.x; gpu_sec_normal[index].y = normal.y; gpu_sec_normal[index].z = normal.z; ambient.x = AMBIENT*RGB_R; ambient.y = AMBIENT*RGB_G; ambient.z = AMBIENT*RGB_B; Point p; p.x = light.pos.x -intersectionPoint.x; p.y = light.pos.y -intersectionPoint.y; p.z = light.pos.z -intersectionPoint.z; Normalize(p); dif =Dot(p,normal); if(dif>0) { Ray rayEye; rayEye.pos.x = intersectionPoint.x; rayEye.pos.y = intersectionPoint.y; rayEye.pos.z = intersectionPoint.z; rayEye.dir.x = light.pos.x -intersectionPoint.x; rayEye.dir.y = light.pos.y -intersectionPoint.y; rayEye.dir.z = light.pos.z -intersectionPoint.z ; Normalize(rayEye.dir); //specular light calculation reflect.x = normal.x*2*dif-p.x; reflect.y = normal.y*2*dif-p.y; reflect.z = normal.z*2*dif-p.z; Normalize(reflect); rayFromEye.x = ray.pos.x - intersectionPoint.x; rayFromEye.y = ray.pos.y - intersectionPoint.y; rayFromEye.z = ray.pos.z - intersectionPoint.z; Normalize(rayFromEye); cos_data = Dot(reflect, rayFromEye); if(lightIn != false)//shadows and occluused { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; if(cos_data>0) { cos_data = powf(cos_data,PHON); specular.x = light.col.x*cos_data*SPECULAR; specular.y = light.col.y*cos_data*SPECULAR; specular.z = light.col.z*cos_data*SPECULAR; } } else if(traverse(rayEye, gpu_insideNode, gpu_vertice, gpu_face,gpu_index, gpu_leafNode, normal, min_dis) == false) { diffuse.x = DIFFUSE*dif*RGB_R; diffuse.y = DIFFUSE*dif*RGB_G; diffuse.z = DIFFUSE*dif*RGB_B; if(cos_data>0) { cos_data = powf(cos_data,PHON); specular.x = light.col.x*cos_data*SPECULAR; specular.y = light.col.y*cos_data*SPECULAR; specular.z = light.col.z*cos_data*SPECULAR; } } } } color.x = diffuse.x + ambient.x + specular.x; color.y = diffuse.y + ambient.y + specular.y; color.z = diffuse.z + ambient.z + specular.z; if(color.x >1.0) color.x =1.0; if(color.y >1.0) color.y =1.0; if(color.z >1.0) color.z =1.0; gpu_color[2+3*index] =(unsigned char)(color.x*255); gpu_color[1+3*index] =(unsigned char)(color.y*255); gpu_color[0+3*index] =(unsigned char)(color.z*255); } } extern "C" void RayCasting(float* matrix, GLubyte* pixelData) { cudaMemcpy(gpu_parameter, matrix, sizeof(float)*PARAMETER_SIZE, cudaMemcpyHostToDevice); int blockSize = d_blockSize; int blocks = d_width*d_height/blockSize; if( (d_width*d_height)%blockSize !=0) blocks++; RenderKDTree<<<blocks,blockSize>>>(d_lightIn, d_numFace, d_numVertex, d_width, d_height, gpu_parameter,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color, gpu_sec_pos, gpu_sec_normal, gpu_sec_index); //cutilDeviceSynchronize(); cudaDeviceSynchronize(); cudaMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(d_width*d_height*3), cudaMemcpyDeviceToHost); } extern "C" void RayTracing(float* matrix, GLubyte* pixelData) { cudaMemcpy(gpu_parameter, matrix, sizeof(float)*PARAMETER_SIZE, cudaMemcpyHostToDevice); int blockSize = d_blockSize; int blocks = d_width*d_height/blockSize; if( (d_width*d_height)%blockSize !=0) blocks++; RenderTracer<<<blocks,blockSize>>>(d_lightIn, d_numFace, d_numVertex, d_width, d_height, gpu_parameter,gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode,gpu_color, gpu_sec_pos, gpu_sec_normal, gpu_sec_index); //cutilDeviceSynchronize(); cudaDeviceSynchronize(); //compact the secondary ray thrust::device_ptr<int> index_ptr(gpu_sec_index); thrust::device_ptr<int> indexDouble_ptr(gpu_sec_indexDouble); cudaMemcpy(gpu_sec_indexDouble, gpu_sec_index,sizeof(int)*d_width*d_height,cudaMemcpyDeviceToDevice); thrust::device_ptr<float3> pos_ptr(gpu_sec_pos); thrust::device_ptr<float3> posDou_ptr(gpu_sec_posDouble); thrust::device_ptr<float3> normal_ptr(gpu_sec_normal); thrust::device_ptr<float3> normalDou_ptr(gpu_sec_normalDouble); thrust::copy_if(pos_ptr, pos_ptr+d_width*d_height,index_ptr, posDou_ptr, is_filled()); thrust::copy_if(normal_ptr, normal_ptr+d_width*d_height,index_ptr, normalDou_ptr, is_filled()); d_numActiveRay = thrust::count(index_ptr, index_ptr+d_width*d_height, -1); d_numActiveRay = d_width*d_height - d_numActiveRay; printf("result:%d\n",d_numActiveRay); thrust::remove_copy(indexDouble_ptr, indexDouble_ptr+d_width*d_height, index_ptr, -1); blocks = (d_numActiveRay*d_sampleSize)/blockSize; if( (d_numActiveRay*d_sampleSize)%blockSize != 0) blocks++; printf("blocks:%d, threads:%d\n", blocks, d_numActiveRay*d_sampleSize); cudaDeviceSynchronize(); //cutilDeviceSynchronize(); SecondRender<<<blocks, blockSize>>>(gpu_rand, d_numActiveRay, d_sampleSize, d_width, d_height ,gpu_parameter, gpu_vertice, gpu_face, gpu_leafNodeIndex, gpu_insideNode, gpu_leafNode, gpu_sec_posDouble, gpu_sec_normalDouble, gpu_sec_index, gpu_Scolor); //cutilDeviceSynchronize(); cudaDeviceSynchronize(); blocks = (d_numActiveRay)/blockSize; if( (d_numActiveRay)%blockSize != 0) blocks++; GenerateSecondImage<<<blocks, blockSize>>>(d_secondary, d_numActiveRay,gpu_sec_index, gpu_color, gpu_Scolor,d_sampleSize); cudaDeviceSynchronize(); //cutilDeviceSynchronize(); cudaMemcpy(pixelData, gpu_color, sizeof( GLubyte )*(d_width*d_height*3), cudaMemcpyDeviceToHost); //cutilDeviceSynchronize(); cudaDeviceSynchronize(); } extern "C" void Prepare_Data(bool secondary, bool lightIn, int blockSize, int sampleSize, int width, int height, GLMmodel* glm_model, KDTree& tree) { d_numFace = glm_model->numtriangles; d_numVertex = glm_model->numvertices; d_width = width; d_height = height; d_sampleSize = sampleSize; d_blockSize = blockSize; d_lightIn = lightIn; d_secondary = secondary; cudaMalloc((void**)&gpu_rand, sizeof(float)*2*d_width*d_height*d_sampleSize); cudaMalloc((void**)&gpu_Scolor, sizeof(GLubyte)*3*d_width*d_height*d_sampleSize); cudaMalloc((void**)&gpu_parameter, sizeof(float)*PARAMETER_SIZE); cudaMalloc((void**)&gpu_color, sizeof( GLubyte )*(d_width*d_height*3)); cudaMalloc((void**)&gpu_face, sizeof( unsigned int )*(glm_model->numtriangles)*3); cudaMalloc((void**)&gpu_vertice, sizeof( float )*(glm_model->numvertices+1)*3); cudaMalloc((void**)&gpu_leafNode, sizeof(KDLeafNode)*(tree.numLeafNode)); cudaMalloc((void**)&gpu_insideNode, sizeof(KDInsideNode)*(tree.numInsideNode)); cudaMalloc((void**)&gpu_leafNodeIndex, sizeof(int)*(tree.SizeofLeafNode)); cudaMalloc((void**)&gpu_sec_pos, sizeof(float3)*(d_width*d_height)); cudaMalloc((void**)&gpu_sec_posDouble, sizeof(float3)*(d_width*d_height)); cudaMalloc((void**)&gpu_sec_normal, sizeof(float3)*(d_width*d_height)); cudaMalloc((void**)&gpu_sec_normalDouble, sizeof(float3)*(d_width*d_height)); cudaMalloc((void**)&gpu_sec_index, sizeof(int)*(d_width*d_height)); cudaMalloc((void**)&gpu_sec_indexDouble, sizeof(int)*(d_width*d_height)); cudaMemcpy(gpu_face, glm_model->vindices, sizeof( unsigned int )*(glm_model->numtriangles)*3, cudaMemcpyHostToDevice); cudaMemcpy(gpu_vertice, glm_model->vertices, sizeof( float )*(glm_model->numvertices+1)*3, cudaMemcpyHostToDevice); cudaMemcpy(gpu_leafNode, tree.leafNode, sizeof(KDLeafNode)*(tree.numLeafNode), cudaMemcpyHostToDevice); cudaMemcpy(gpu_insideNode, tree.insideNode, sizeof(KDInsideNode)*(tree.numInsideNode), cudaMemcpyHostToDevice); cudaMemcpy(gpu_leafNodeIndex, tree.leafNodeIndex, sizeof(int)*(tree.SizeofLeafNode), cudaMemcpyHostToDevice); float* tmp_random; tmp_random = (float*)malloc(sizeof(float)*2*d_width*d_height*d_sampleSize); for(int i = 0; i<d_width*d_height*d_sampleSize*2; i++) tmp_random[i] = ((float)rand()/RAND_MAX); cudaMemcpy(gpu_rand, tmp_random, sizeof(float)*2*d_width*d_height*d_sampleSize, cudaMemcpyHostToDevice); free(tmp_random); } extern "C" void Clean_Data() { cudaFree(gpu_parameter); cudaFree(gpu_vertice); cudaFree(gpu_face); cudaFree(gpu_leafNodeIndex); cudaFree(gpu_insideNode); cudaFree(gpu_leafNode); cudaFree(gpu_color); cudaFree(gpu_sec_pos); cudaFree(gpu_sec_posDouble); cudaFree(gpu_sec_normal); cudaFree(gpu_sec_normalDouble); cudaFree(gpu_sec_index); cudaFree(gpu_sec_indexDouble); cudaFree(gpu_Scolor); cudaFree(gpu_rand); }
8b86406bca39bf7cfba0edc80bb883815d01ce3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <iostream> using namespace std; __global__ void calcEuropeanOption(int timeSteps, double startPrice, double strikePrice, double riskFree, double delta, double u, double p_u, double * cache) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > timeSteps) return; int colDim = timeSteps + 1; cache[timeSteps * colDim + i] = max(startPrice * pow(u, 2 * i - timeSteps) - strikePrice, 0.0); timeSteps--; while (timeSteps >= i) { cache[timeSteps * colDim + i] = (p_u * cache[(timeSteps + 1) * colDim + i + 1] + (1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta); timeSteps--; __syncthreads(); } } int main() { double startPrice = 100; double strikePrice = 100; double timeToExpiry = 1.5; double vol = 0.12; double riskFree = 0.005; int timeSteps = 100; double delta = timeToExpiry / timeSteps; double u = exp(vol * sqrt(delta)); double p_u = (exp(riskFree * delta) - 1/u) / (u - 1/u); int N = timeSteps + 1; double * cache = new double[N * N]; double * d_cache; hipMalloc(&d_cache, N * N * sizeof(double)); hipLaunchKernelGGL(( calcEuropeanOption), dim3((timeSteps + 255)/256), dim3(256), 0, 0, timeSteps, startPrice,strikePrice, riskFree, delta, u, p_u, d_cache); double * finalPrice; hipMemcpy(finalPrice, d_cache, sizeof(double), hipMemcpyDeviceToHost); cout << "Price: " << *finalPrice << endl; hipFree(d_cache); }
8b86406bca39bf7cfba0edc80bb883815d01ce3c.cu
#include <algorithm> #include <iostream> using namespace std; __global__ void calcEuropeanOption(int timeSteps, double startPrice, double strikePrice, double riskFree, double delta, double u, double p_u, double * cache) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > timeSteps) return; int colDim = timeSteps + 1; cache[timeSteps * colDim + i] = max(startPrice * pow(u, 2 * i - timeSteps) - strikePrice, 0.0); timeSteps--; while (timeSteps >= i) { cache[timeSteps * colDim + i] = (p_u * cache[(timeSteps + 1) * colDim + i + 1] + (1 - p_u) * cache[(timeSteps + 1) * colDim + i ]) * exp(-riskFree * delta); timeSteps--; __syncthreads(); } } int main() { double startPrice = 100; double strikePrice = 100; double timeToExpiry = 1.5; double vol = 0.12; double riskFree = 0.005; int timeSteps = 100; double delta = timeToExpiry / timeSteps; double u = exp(vol * sqrt(delta)); double p_u = (exp(riskFree * delta) - 1/u) / (u - 1/u); int N = timeSteps + 1; double * cache = new double[N * N]; double * d_cache; cudaMalloc(&d_cache, N * N * sizeof(double)); calcEuropeanOption<<<(timeSteps + 255)/256, 256>>>(timeSteps, startPrice,strikePrice, riskFree, delta, u, p_u, d_cache); double * finalPrice; cudaMemcpy(finalPrice, d_cache, sizeof(double), cudaMemcpyDeviceToHost); cout << "Price: " << *finalPrice << endl; cudaFree(d_cache); }
de63d6f86613674d112394f9ed5c903930819f26.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../../helpers/csv.h" #include "../../helpers/cuda/reduce.h" typedef struct { float *x; float *y; int count; } Points; Points load_data(char *file_name, int count) { std::ifstream file(file_name); Points result = { (float *) malloc(sizeof(float) * count), (float *) malloc(sizeof(float) * count), count }; CSVRow row; for (int i = 0; i < result.count; i++) { file >> row; result.x[i] = atof(row[0].c_str()); result.y[i] = atof(row[1].c_str()); } return result; } __global__ void calculate_error_kernel( const float b, const float m, float const *d_x, float const *d_y, float *d_output, const int size ) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { // (y - (m*x + b)) ** 2 float error = d_y[index] - (m * d_x[index] + b); d_output[index] = pow(error, 2); } } float calculate_error( const float b, const float m, float const *d_x, float const *d_y, const int size ) { const int MAX_THREADS = 1024; const int THREAD_COUNT = min(MAX_THREADS, size); const int BLOCK_COUNT = ceil((double) size / (double) THREAD_COUNT); float *d_errors; hipMalloc((void **) &d_errors, sizeof(float) * size); // Calclulate the errors for all (x, y) pairs hipLaunchKernelGGL(( calculate_error_kernel), dim3(BLOCK_COUNT), dim3(THREAD_COUNT), 0, 0, b, m, d_x, d_y, d_errors, size); // Calculate the total error float *d_error_sum; hipMalloc((void **) &d_error_sum, sizeof(float)); primitive_reduce_add(d_errors, d_error_sum, size); // Copy the error sum to the host float h_error_sum[1]; hipMemcpy(h_error_sum, d_error_sum, sizeof(float), hipMemcpyDeviceToHost); hipFree(d_error_sum); hipFree(d_errors); // Return mean error return h_error_sum[0] / (float) size; } __global__ void gradient_step_kernel( float const *d_b, float const *d_m, float const *d_x, float const *d_y, float *d_b_gradients, float *d_m_gradients, const int size ) { } void gradient_step( float *d_b, float *d_m, float const *d_x, float const *d_y, const int size, const float learning_rate ) { const int MAX_THREADS = 1024; const int THREAD_COUNT = min(MAX_THREADS, size); const int BLOCK_COUNT = ceil((double) size / (double) THREAD_COUNT); // Allocate memory on the GPU for the new gradients for each (x, y) pair float *d_b_gradients, *d_m_gradients; hipMalloc((void **) &d_b_gradients, sizeof(float) * size); hipMalloc((void **) &d_m_gradients, sizeof(float) * size); hipLaunchKernelGGL(( gradient_step_kernel), dim3(BLOCK_COUNT), dim3(THREAD_COUNT), 0, 0, d_b, d_m, d_x, d_y, d_b_gradients, d_m_gradients, size); } void gradient_descent( float *h_b, float *h_m, float const *d_x, float const *d_y, const int size, const int number_of_iterations, const float learning_rate ) { // Copy b and m to GPU float *d_b, *d_m; hipMalloc((void**) &d_b, sizeof(float)); hipMemcpy(d_b, h_b, sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &d_m, sizeof(float)); hipMemcpy(d_m, h_m, sizeof(float), hipMemcpyHostToDevice); // Perform the gradient steps for (int i = 0; i < number_of_iterations; i++) { gradient_step(d_b, d_m, d_x, d_y, size, learning_rate); } // Copy the new values for b and m back to the host hipMemcpy(h_b, d_b, sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_m, d_m, sizeof(float), hipMemcpyDeviceToHost); hipFree(d_b); hipFree(d_m); } int main(int argc, char **argv) { // Load the data Points points = load_data(argv[1], atoi(argv[2])); // Copy the data to the GPU float *d_x, *d_y; hipMalloc((void **) &d_x, sizeof(float) * points.count); hipMemcpy(d_x, points.x, sizeof(float) * points.count, hipMemcpyHostToDevice); hipMalloc((void **) &d_y, sizeof(float) * points.count); hipMemcpy(d_y, points.y, sizeof(float) * points.count, hipMemcpyHostToDevice); // Define hyperparameters float learning_rate = 0.0001; // y = mx + b float initial_b[1] = {0}; float initia_m[1] = {0}; int number_of_iterations = 1000; // Calculate the inital error float error = calculate_error(initial_b[0], initia_m[0], d_x, d_y, points.count); printf("start gradient descent at b = %f, m = %f, error = %f\n", initial_b[0], initia_m[0], error); // Perform gradient descent gradient_descent(initial_b, initia_m, d_x, d_y, points.count, number_of_iterations, learning_rate); // Calculate the new error error = calculate_error(initial_b[0], initia_m[0], d_x, d_y, points.count); printf("end point at b = %f, m = %f, error = %f\n", initial_b[0], initia_m[0], error); hipFree(d_x); hipFree(d_y); free(points.x); free(points.y); }
de63d6f86613674d112394f9ed5c903930819f26.cu
#include "../../helpers/csv.h" #include "../../helpers/cuda/reduce.h" typedef struct { float *x; float *y; int count; } Points; Points load_data(char *file_name, int count) { std::ifstream file(file_name); Points result = { (float *) malloc(sizeof(float) * count), (float *) malloc(sizeof(float) * count), count }; CSVRow row; for (int i = 0; i < result.count; i++) { file >> row; result.x[i] = atof(row[0].c_str()); result.y[i] = atof(row[1].c_str()); } return result; } __global__ void calculate_error_kernel( const float b, const float m, float const *d_x, float const *d_y, float *d_output, const int size ) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { // (y - (m*x + b)) ** 2 float error = d_y[index] - (m * d_x[index] + b); d_output[index] = pow(error, 2); } } float calculate_error( const float b, const float m, float const *d_x, float const *d_y, const int size ) { const int MAX_THREADS = 1024; const int THREAD_COUNT = min(MAX_THREADS, size); const int BLOCK_COUNT = ceil((double) size / (double) THREAD_COUNT); float *d_errors; cudaMalloc((void **) &d_errors, sizeof(float) * size); // Calclulate the errors for all (x, y) pairs calculate_error_kernel<<<BLOCK_COUNT, THREAD_COUNT>>>(b, m, d_x, d_y, d_errors, size); // Calculate the total error float *d_error_sum; cudaMalloc((void **) &d_error_sum, sizeof(float)); primitive_reduce_add(d_errors, d_error_sum, size); // Copy the error sum to the host float h_error_sum[1]; cudaMemcpy(h_error_sum, d_error_sum, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_error_sum); cudaFree(d_errors); // Return mean error return h_error_sum[0] / (float) size; } __global__ void gradient_step_kernel( float const *d_b, float const *d_m, float const *d_x, float const *d_y, float *d_b_gradients, float *d_m_gradients, const int size ) { } void gradient_step( float *d_b, float *d_m, float const *d_x, float const *d_y, const int size, const float learning_rate ) { const int MAX_THREADS = 1024; const int THREAD_COUNT = min(MAX_THREADS, size); const int BLOCK_COUNT = ceil((double) size / (double) THREAD_COUNT); // Allocate memory on the GPU for the new gradients for each (x, y) pair float *d_b_gradients, *d_m_gradients; cudaMalloc((void **) &d_b_gradients, sizeof(float) * size); cudaMalloc((void **) &d_m_gradients, sizeof(float) * size); gradient_step_kernel<<<BLOCK_COUNT, THREAD_COUNT>>>(d_b, d_m, d_x, d_y, d_b_gradients, d_m_gradients, size); } void gradient_descent( float *h_b, float *h_m, float const *d_x, float const *d_y, const int size, const int number_of_iterations, const float learning_rate ) { // Copy b and m to GPU float *d_b, *d_m; cudaMalloc((void**) &d_b, sizeof(float)); cudaMemcpy(d_b, h_b, sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &d_m, sizeof(float)); cudaMemcpy(d_m, h_m, sizeof(float), cudaMemcpyHostToDevice); // Perform the gradient steps for (int i = 0; i < number_of_iterations; i++) { gradient_step(d_b, d_m, d_x, d_y, size, learning_rate); } // Copy the new values for b and m back to the host cudaMemcpy(h_b, d_b, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_m, d_m, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_b); cudaFree(d_m); } int main(int argc, char **argv) { // Load the data Points points = load_data(argv[1], atoi(argv[2])); // Copy the data to the GPU float *d_x, *d_y; cudaMalloc((void **) &d_x, sizeof(float) * points.count); cudaMemcpy(d_x, points.x, sizeof(float) * points.count, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_y, sizeof(float) * points.count); cudaMemcpy(d_y, points.y, sizeof(float) * points.count, cudaMemcpyHostToDevice); // Define hyperparameters float learning_rate = 0.0001; // y = mx + b float initial_b[1] = {0}; float initia_m[1] = {0}; int number_of_iterations = 1000; // Calculate the inital error float error = calculate_error(initial_b[0], initia_m[0], d_x, d_y, points.count); printf("start gradient descent at b = %f, m = %f, error = %f\n", initial_b[0], initia_m[0], error); // Perform gradient descent gradient_descent(initial_b, initia_m, d_x, d_y, points.count, number_of_iterations, learning_rate); // Calculate the new error error = calculate_error(initial_b[0], initia_m[0], d_x, d_y, points.count); printf("end point at b = %f, m = %f, error = %f\n", initial_b[0], initia_m[0], error); cudaFree(d_x); cudaFree(d_y); free(points.x); free(points.y); }
bc09efc7dc6d94edf5785a80206807fed29bd48c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <aggregation/aggregation_amg_level.h> #include <profile.h> #include <matrix_analysis.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #ifdef _WIN32 #pragma warning (pop) #endif #include <basic_types.h> #include <util.h> #include <fstream> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <string> #include <string.h> #include <iostream> #include <algorithm> #include <amgx_timer.h> #include <amgx_types/util.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/transform.h> #include <thrust/binary_search.h> #include <thrust/unique.h> #include <thrust/inner_product.h> namespace amgx { namespace aggregation { // ---------------------- // Kernels // ---------------------- template <typename IndexType, typename ValueType> __global__ void set_to_one_kernel(IndexType start, IndexType end, IndexType *ind, ValueType *v) { for (int tid = start + blockDim.x * blockIdx.x + threadIdx.x; tid < end; tid += gridDim.x * blockDim.x) { v[ind[tid]] = types::util<ValueType>::get_one(); } } template <typename IndexType> __global__ void renumberAggregatesKernel(const IndexType *renumbering, const int interior_offset, const int bdy_offset, IndexType *aggregates, const int num_aggregates, const int n_interior, const int renumbering_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < num_aggregates) { IndexType new_agg_id; if (renumbering_size == 0) { new_agg_id = aggregates[tid]; } else { new_agg_id = renumbering[aggregates[tid]]; } //if (aggregates[tid] > num_aggregates) //{ //printf("ID %d old %d + %d = %d\n", tid, new_agg_id, ((new_agg_id >= n_interior) ? bdy_offset : interior_offset), new_agg_id + ((new_agg_id >= n_interior) ? bdy_offset : interior_offset)); //} new_agg_id += ((new_agg_id >= n_interior) ? bdy_offset : interior_offset); aggregates[tid] = new_agg_id; tid += gridDim.x * blockDim.x; } } // Kernel to restrict residual using csr_format template <typename IndexType, typename ValueType> __global__ void restrictResidualKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates) { int jmin, jmax; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x) { ValueType temp(types::util<ValueType>::get_zero()); jmin = row_offsets[tid]; jmax = row_offsets[tid + 1]; for (int j = jmin; j < jmax; j++) { int j_col = column_indices[j]; temp = temp + r[j_col]; } rr[tid] = temp; } } // Kernel to restrict residual using block_dia_csr_format template <typename IndexType, typename ValueType, int bsize> __global__ void restrictResidualBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates) { ValueType rr_temp[bsize]; int offset, jmin, jmax; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x) { // Initialize to zero #pragma unroll for (int m = 0; m < bsize; m++) { rr_temp[m] = types::util<ValueType>::get_zero(); } jmin = row_offsets[tid]; jmax = row_offsets[tid + 1]; for (int j = jmin; j < jmax; j++) { int jcol = column_indices[j]; offset = jcol * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { rr_temp[m] = rr_temp[m] + r[offset + m]; } } offset = tid * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { rr[offset + m] = rr_temp[m]; }; } } // Kernel to prolongate and apply the correction for csr format template <typename IndexType, typename ValueType> __global__ void prolongateAndApplyCorrectionKernel(const ValueType alpha, const int num_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x) { IndexType I = aggregates[tid]; x[tid] = x[tid] + alpha * e[I]; } } // Kernel to prolongate and apply the correction for block-dia-csr format template <typename IndexType, typename ValueType> __global__ void prolongateAndApplyCorrectionBlockDiaCsrKernel(const ValueType alpha, const int num_block_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates, const int bsize) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_block_rows; tid += gridDim.x * blockDim.x) { IndexType I = aggregates[tid]; for (int m = 0; m < bsize; m++) { x[tid * bsize + m] = x[tid * bsize + m] + alpha * e[I * bsize + m]; } } } template <typename IndexType, typename ValueType> __global__ void prolongateVector(const IndexType *aggregates, const ValueType *in, ValueType *out, IndexType fine_rows, IndexType coarse_rows, int blocksize) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < fine_rows * blocksize ) { int i = tid / blocksize; int e = tid % blocksize; IndexType I = aggregates[i]; out[tid] = in[ I * blocksize + e ]; tid += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueType> __global__ void applyCorrection(ValueType lambda, const ValueType *e, ValueType *x, IndexType numRows ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < numRows ) { x[tid] = x[tid] + lambda * e[tid]; tid += gridDim.x * blockDim.x; } } // ------------------------------- // Methods // ------------------------------ // Constructor template <class T_Config> Aggregation_AMG_Level_Base<T_Config>::Aggregation_AMG_Level_Base(AMG_Class *amg, ThreadManager *tmng) : AMG_Level<T_Config>(amg, tmng) { m_selector = SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); m_coarseAGenerator = CoarseAGeneratorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); m_matrix_halo_exchange = amg->m_cfg->AMG_Config::getParameter<int>("matrix_halo_exchange", amg->m_cfg_scope); m_print_aggregation_info = amg->m_cfg->AMG_Config::getParameter<int>("print_aggregation_info", amg->m_cfg_scope) != 0; m_error_scaling = amg->m_cfg->AMG_Config::getParameter<int>("error_scaling", amg->m_cfg_scope ); reuse_scale = amg->m_cfg->AMG_Config::getParameter<int>("reuse_scale", amg->m_cfg_scope ); scaling_smoother_steps = amg->m_cfg->AMG_Config::getParameter<int>("scaling_smoother_steps", amg->m_cfg_scope ); scale_counter = 0; } // Destructor template <class T_Config> Aggregation_AMG_Level_Base<T_Config>::~Aggregation_AMG_Level_Base() { delete m_selector; delete m_coarseAGenerator; } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl) { Aggregation_AMG_Level_Base<TConfig1> *ref_agg_lvl = dynamic_cast<Aggregation_AMG_Level_Base<TConfig1>*>(ref_lvl); this->scale_counter = ref_agg_lvl->scale_counter; this->scale = ref_agg_lvl->scale; this->m_R_row_offsets.copy(ref_agg_lvl->m_R_row_offsets); this->m_R_column_indices.copy(ref_agg_lvl->m_R_column_indices); this->m_aggregates.copy(ref_agg_lvl->m_aggregates); this->m_aggregates_fine_idx.copy(ref_agg_lvl->m_aggregates_fine_idx); this->m_num_aggregates = ref_agg_lvl->m_num_aggregates; this->m_num_all_aggregates = ref_agg_lvl->m_num_all_aggregates; } typedef std::pair<int, int> mypair; bool comparator ( const mypair &l, const mypair &r) { return l.first < r.first; } // Method to compute R // General path // TODO: this could be merged with selector to save some computations template <typename T_Config> void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator_common() { m_R_row_offsets.resize(m_num_all_aggregates + 1); //create one more row for the pseudo aggregate IVector R_row_indices(m_aggregates); #if AMGX_ASYNCCPU_PROOF_OF_CONCEPT bool use_cpu = m_aggregates.size() < 4096; if (use_cpu) { struct computeRestrictionTask : public task { Aggregation_AMG_Level_Base<T_Config> *self; IVector *R_row_indices; void run() { int N = self->m_aggregates.size(); IVector_h R_row_indices_host(self->m_aggregates); std::vector<mypair> pairs(N); for (int i = 0; i < N; i++) { pairs[i].first = R_row_indices_host[i]; pairs[i].second = i; } std::stable_sort(pairs.begin(), pairs.end(), comparator); IVector_h R_column_indices(self->A->get_num_rows()); for (int i = 0; i < N; i++) { R_column_indices[i] = pairs[i].second; R_row_indices_host[i] = pairs[i].first; } self->m_R_column_indices = R_column_indices; *R_row_indices = R_row_indices_host; } }; computeRestrictionTask *t = new computeRestrictionTask(); t->self = this; t->R_row_indices = &R_row_indices; t->run(); delete t; } else #endif { m_R_column_indices.resize(this->A->get_num_rows()); thrust::sequence(m_R_column_indices.begin(), m_R_column_indices.end()); cudaCheckError(); thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), m_R_column_indices.begin()); cudaCheckError(); } thrust::lower_bound(R_row_indices.begin(), R_row_indices.end(), thrust::counting_iterator<typename IVector::value_type>(0), thrust::counting_iterator<typename IVector::value_type>(m_R_row_offsets.size()), m_R_row_offsets.begin()); cudaCheckError(); } // two methods below could be merged // Method to compute R on HOST using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1() { this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1); this->m_R_column_indices.resize(this->A->get_num_rows()); this->fillRowOffsetsAndColIndices(this->A->get_num_rows()); } // Method to compute R on HOST using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4() { this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1); this->m_R_column_indices.resize(this->A->get_num_rows()); this->fillRowOffsetsAndColIndices(this->A->get_num_rows()); } // Method to create R_row_offsest and R_column_indices array on HOST using csr or block dia-csr format template <typename T_Config> void Aggregation_AMG_Level_Base<T_Config>::fillRowOffsetsAndColIndices(const int R_num_cols) { for (int i = 0; i < m_num_all_aggregates + 1; i++) { m_R_row_offsets[i] = 0; } // Count number of neighbors for each row for (int i = 0; i < R_num_cols; i++) { int I = m_aggregates[i]; m_R_row_offsets[I]++; } m_R_row_offsets[m_num_all_aggregates] = R_num_cols; for (int i = m_num_all_aggregates - 1; i >= 0; i--) { m_R_row_offsets[i] = m_R_row_offsets[i + 1] - m_R_row_offsets[i]; } /* Set column indices. */ for (int i = 0; i < R_num_cols; i++) { int I = m_aggregates[i]; int Ip = m_R_row_offsets[I]++; m_R_column_indices[Ip] = i; } /* Reset r[i] to start of row memory. */ for (int i = m_num_all_aggregates - 1; i > 0; i--) { m_R_row_offsets[i] = m_R_row_offsets[i - 1]; } m_R_row_offsets[0] = 0; } // Method to compute R on DEVICE using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4() { this->computeRestrictionOperator_common(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1() { this->computeRestrictionOperator_common(); } // Method to restrict Residual on host using csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr) { ValueTypeB temp; for (int i = 0; i < this->m_num_aggregates; i++) { temp = types::util<ValueTypeB>::get_zero(); for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++) { int j_col = this->m_R_column_indices[j]; temp = temp + r[j_col]; } rr[i] = temp; } } // Method to restrict Residual on host using block_dia_csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr) { IndexType bsize = this->A->get_block_dimy(); ValueTypeB *temp = new ValueTypeB[bsize]; for (int i = 0; i < this->m_num_aggregates; i++) { // Initialize temp to 0 for (int k = 0; k < bsize; k++) { temp[k] = types::util<ValueTypeB>::get_zero(); } // Add contributions from each fine point for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++) { int j_col = this->m_R_column_indices[j]; for (int k = 0; k < bsize; k++) { temp[k] = temp[k] + r[j_col * bsize + k]; } } // Store result for (int k = 0; k < bsize; k++) { rr[i * bsize + k] = temp[k]; } } } // Method to restrict Residual on device using csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::restrict_residual_1x1 "); int block_size = 64; int max_threads;; if (!this->isConsolidationLevel()) { max_threads = this->m_num_aggregates; } else { max_threads = this->m_num_all_aggregates; } int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads - 1) / block_size + 1); const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw(); const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw(); const ValueTypeB *r_ptr = r.raw(); ValueTypeB *rr_ptr = rr.raw(); hipLaunchKernelGGL(( restrictResidualKernel) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); cudaCheckError(); } // Method to restrict Residual on device using block_dia_csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::restrict_residual_4x4 "); int block_size = 64; int max_threads; if (!this->isConsolidationLevel()) { max_threads = this->m_num_aggregates; } else { max_threads = this->m_num_all_aggregates; }; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads + block_size - 1) / block_size); const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw(); const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw(); const ValueTypeB *r_ptr = r.raw(); ValueTypeB *rr_ptr = rr.raw(); cudaCheckError(); switch ( this->getA().get_block_dimy() ) { case 2: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 2>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 3: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 3>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 4: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 4>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 5: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 5>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 8: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 8>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 10: hipLaunchKernelGGL(( restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 10>) , dim3(num_blocks), dim3(block_size), 0, 0, R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; default: FatalError( "Unsupported block size in restrictResidual_4x4!!!", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } __inline__ float getAlpha(float &nom, float &denom) { float alpha; if (nom * denom <= 0. || std::abs(nom) < std::abs(denom)) { alpha = 1.; } else if (std::abs(nom) > 2.*std::abs(denom)) { alpha = 2.; } else { alpha = nom / denom; } return alpha; } __inline__ double getAlpha(double &nom, double &denom) { double alpha; if (nom * denom <= 0. || std::abs(nom) < std::abs(denom)) { alpha = 1.; } else if (std::abs(nom) > 2.*std::abs(denom)) { alpha = 2.; } else { alpha = nom / denom; } return alpha; } __inline__ hipComplex getAlpha(hipComplex &nom, hipComplex &denom) { hipComplex alpha; if (types::util<hipComplex>::abs(nom) < types::util<hipComplex>::abs(denom)) { alpha = make_cuComplex(1.f, 0.f); } else if (types::util<hipComplex>::abs(nom) > 2.*types::util<hipComplex>::abs(denom)) { alpha = make_cuComplex(2.f, 0.f); } else { alpha = nom / denom; } return alpha; } __inline__ hipDoubleComplex getAlpha(hipDoubleComplex &nom, hipDoubleComplex &denom) { hipDoubleComplex alpha; if (types::util<hipDoubleComplex>::abs(nom) < types::util<hipDoubleComplex>::abs(denom)) { alpha = make_cuDoubleComplex(1., 0.); } else if (types::util<hipDoubleComplex>::abs(nom) > 2.*types::util<hipDoubleComplex>::abs(denom)) { alpha = make_cuDoubleComplex(2., 0.); } else { alpha = nom / denom; } return alpha; } template< class T_Config> typename T_Config::VecPrec Aggregation_AMG_Level_Base<T_Config>::computeAlpha(const Vector<T_Config> &e, const Vector<T_Config> &bc, const Vector<T_Config> &tmp) { typename T_Config::VecPrec alpha = types::util<ValueTypeB>::get_one(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); int size = Ac.get_num_rows(); VVector v(2, types::util<ValueTypeB>::get_zero()); v[0] = thrust::inner_product(e.begin(), e.begin() + size, bc.begin(), types::util<ValueTypeB>::get_zero()); v[1] = thrust::inner_product(e.begin(), e.begin() + size, tmp.begin(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); return getAlpha(v[0], v[1]); } // Method to prolongate the error on HOST using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A = this->getA(); Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &C = this->next_h->getA(); if ( this->m_error_scaling >= 2 ) { FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED ); } ValueTypeB alpha = types::util<ValueTypeB>::get_one(); if (this->m_error_scaling) { multiply(this->next_h->getA(), e, tmp); alpha = this->computeAlpha (e, bc, tmp); } // Apply correction on all (interior and exterior) equations. for (int i = 0; i < A.get_num_cols(); i++) { int I = this->m_aggregates[i]; x[i] = x[i] + alpha * e[I]; } } // Method to prolongate the error on HOST using block_dia_csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { if (this->A->get_block_dimy() != this->A->get_block_dimx()) { FatalError("Aggregation_AMG_Level not implemented for non square blocks, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( this->m_error_scaling >= 2 ) { FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED ); } Matrix<TConfig> &C = this->next_h->getA(); ValueTypeB alpha = types::util<ValueTypeB>::get_one(); if (this->m_error_scaling) { multiply(this->next_h->getA(), e, tmp); alpha = this->computeAlpha (e, bc, tmp); } // Apply correction on all equations. for (int i = 0; i < this->A->get_num_rows(); i++) { int I = this->m_aggregates[i]; for (int k = 0; k < this->A->get_block_dimy(); k++) { x[i * this->A->get_block_dimy() + k] = x[i * this->A->get_block_dimy() + k] + alpha * e[I * this->A->get_block_dimy() + k]; } } } // Prolongate the error on DEVICE using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::prolongate_and_apply_correction_1x1 "); ValueTypeB alpha = types::util<ValueTypeB>::get_one(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ( (this->A->get_num_rows() + block_size - 1) / block_size ) ); const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = x.raw(); const ValueTypeB *e_ptr = e.raw(); if (this->m_error_scaling) { FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED ); } hipLaunchKernelGGL(( prolongateAndApplyCorrectionKernel) , dim3(num_blocks), dim3(block_size), 0, 0, alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates); cudaCheckError(); } // Prolongate the error on DEVICE using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &ec, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bf, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &xf, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &rf) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::prolongate_and_apply_correction_4x4 "); if ( this->m_error_scaling >= 2 ) { if ( this->scale_counter > 0 ) { const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = xf.raw(); const ValueTypeB *e_ptr = ec.raw(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1)); hipLaunchKernelGGL(( prolongateAndApplyCorrectionBlockDiaCsrKernel) , dim3(num_blocks), dim3(block_size), 0, 0, this->scale, (int)this->getA().get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->getA().get_block_dimy()); cudaCheckError(); this->scale_counter--; return; } bool vanek_scaling = this->m_error_scaling > 3; IndexType numRowsCoarse = this->next_d->getA().get_num_rows(); IndexType numRowsFine = this->A->get_num_rows(); IndexType blockdim = this->A->get_block_dimx(); if ( blockdim != this->A->get_block_dimy() ) { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } VVector ef( rf.size() ); VVector Aef( rf.size() ); ef.set_block_dimy( blockdim ); Aef.set_block_dimy( blockdim ); // prolongate e const int threads_per_block = 256; const int num_block_values = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1); const hipStream_t stream = nullptr; hipLaunchKernelGGL(( prolongateVector) , dim3(num_block_values), dim3(threads_per_block), 0, stream, this->m_aggregates.raw(), ec.raw(), ef.raw(), numRowsFine, numRowsCoarse, blockdim ); ef.dirtybit = 1; hipStreamSynchronize(stream); cudaCheckError(); int preSmooth; if ( vanek_scaling ) { preSmooth = this->amg->getNumPostsweeps(); } else { preSmooth = this->scaling_smoother_steps; } //smooth error this->smoother->setTolerance( 0.0 ); this->smoother->set_max_iters( preSmooth ); if ( vanek_scaling ) { thrust::fill( Aef.begin(), Aef.end(), types::util<ValueTypeB>::get_zero() ); cudaCheckError(); this->smoother->solve( Aef, ef, false ); //smooth correction with rhs 0 this->smoother->solve( bf, xf, false ); // smooth x with rhs residual //recompute residual int offset, size; this->getA().getOffsetAndSizeForView(OWNED, &offset, &size); axmb( this->getA(), xf, bf, rf, offset, size ); } else { this->smoother->solve( rf, ef, false ); //smooth correction with rhs residual } // multiply for lambda computation multiply(this->getA(), ef, Aef, OWNED); ValueTypeB nominator, denominator; int offset = 0, size = 0; this->A->getOffsetAndSizeForView(OWNED, &offset, &size); if ( this->m_error_scaling == 2 || this->m_error_scaling == 4 ) { // compute lambda=<rf,Aef>/<Aef,Aef> nominator = thrust::inner_product( rf.begin(), rf.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() ); denominator = thrust::inner_product( Aef.begin(), Aef.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() ); cudaCheckError(); } if ( this->m_error_scaling == 3 || this->m_error_scaling == 5) { // compute lambda=<rf,ef>/<ef,Aef> nominator = thrust::inner_product( rf.begin(), rf.begin() + size * blockdim, ef.begin(), types::util<ValueTypeB>::get_zero() ); denominator = thrust::inner_product( ef.begin(), ef.begin() + size * blockdim, Aef.begin(), types::util<ValueTypeB>::get_zero() ); if (!this->A->is_matrix_singleGPU()) { this->A->getManager()->global_reduce_sum(&nominator); this->A->getManager()->global_reduce_sum(&denominator); } cudaCheckError(); } if (types::util<ValueTypeB>::abs(denominator) == 0.0) { nominator = denominator = types::util<ValueTypeB>::get_one(); } // apply correction x <- x + lambda*e const int num_block_fine = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1 ); ValueTypeB alpha = nominator / denominator; if ( types::util<ValueTypeB>::abs(alpha) < .3 ) { alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * .3; // it was this before: alpha = .3, which is not 100% equal } if ( types::util<ValueTypeB>::abs(alpha) > 10 ) { alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * 10.; // it was this before: alpha = 10., which is not 100% equal } hipLaunchKernelGGL(( applyCorrection) , dim3(num_block_fine), dim3(threads_per_block), 0, stream, alpha, ef.raw(), xf.raw(), numRowsFine * blockdim ); cudaCheckError(); this->scale_counter = this->reuse_scale; //reuse this scale scale_counter times this->scale = alpha; return; } ValueTypeB alpha = types::util<ValueTypeB>::get_one(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1)); const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = xf.raw(); const ValueTypeB *e_ptr = ec.raw(); if (this->m_error_scaling == 1) { FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED ); } hipLaunchKernelGGL(( prolongateAndApplyCorrectionBlockDiaCsrKernel) , dim3(num_blocks), dim3(block_size), 0, 0, alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->A->get_block_dimy()); cudaCheckError(); } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config >::prolongateAndApplyCorrection(VVector &e, VVector &bf, VVector &x, VVector &tmp) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //this is dirty, but error scaling 2 and 3 do not have a specialized version. Instead, the general version sits in the 4x4 function if ( this->m_error_scaling >= 2 ) { prolongateAndApplyCorrection_4x4(e, bf, x, tmp); } else if (this->A->get_block_size() == 1) { prolongateAndApplyCorrection_1x1(e, bf, x, tmp); } else if (this->A->get_block_dimx() == this->A->get_block_dimy() ) { prolongateAndApplyCorrection_4x4(e, bf, x, tmp); } else { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } x.dirtybit = 1; if (!this->A->is_matrix_singleGPU() && x.delayed_send == 0) { if (x.in_transfer & RECEIVING) { this->A->manager->exchange_halo_wait(x, x.tag); } this->A->manager->exchange_halo_async(x, x.tag); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr) { if (this->A->get_block_size() == 1) { restrictResidual_1x1(r, rr); } else if (this->A->get_block_dimx() == this->A->get_block_dimy() ) { restrictResidual_4x4(r, rr); } else { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } //TODO: check level transfer between host and device for multiGPU if (!this->A->is_matrix_singleGPU()) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); rr.dirtybit = 1; if (!Ac.is_matrix_singleGPU() && !this->isConsolidationLevel() && rr.delayed_send == 0) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //TODO problem in memoryspace transfer is here if (rr.in_transfer & RECEIVING) { Ac.manager->exchange_halo_wait(rr, rr.tag); } Ac.manager->exchange_halo_async(rr, rr.tag); } } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator() { if (this->A->get_block_size() == 1) { computeRestrictionOperator_1x1(); } else if (this->A->get_block_dimx() == 4 && this->A->get_block_dimy() == 4) { computeRestrictionOperator_4x4(); } else { this->computeRestrictionOperator_common(); } } template <typename IndexType> __global__ void coarse_to_global(IndexType *aggregates, IndexType *aggregates_global, IndexType *renumbering, IndexType num_elements, int64_t offset) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < num_elements) { renumbering[aggregates[element]] = aggregates_global[element] + offset; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } template <typename T, typename IndexType> __global__ void export_matrix_elements(IndexType *row_offsets, IndexType *col_indices, T *values, IndexType *maps, IndexType *renumbering, IndexType *new_row_offsets, IndexType *new_col_indices, T *new_values, IndexType bsize, IndexType size) { int idx = blockIdx.x * blockDim.x / 32 + threadIdx.x / 32; int coopIdx = threadIdx.x % 32; while (idx < size) { int row = maps[idx]; INDEX_TYPE src_base = row_offsets[row]; INDEX_TYPE dst_base = new_row_offsets[idx]; for (int m = coopIdx; m < row_offsets[row + 1]*bsize - src_base * bsize; m += 32) { new_values[dst_base * bsize + m] = values[src_base * bsize + m]; } for (int m = coopIdx; m < row_offsets[row + 1] - src_base; m += 32) { new_col_indices[dst_base + m] = renumbering[col_indices[src_base + m]]; } idx += gridDim.x * blockDim.x / 32; } } template <class T> __global__ void export_matrix_diagonal(T *values, INDEX_TYPE bsize, INDEX_TYPE *maps, T *output, INDEX_TYPE size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < size) { int row = maps[idx]; INDEX_TYPE src_base = row; INDEX_TYPE dst_base = idx; for (int m = 0; m < bsize; m++) { output[dst_base * bsize + m] = values[src_base * bsize + m]; } idx += gridDim.x * blockDim.x; } } __global__ void remove_boundary(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < size) { flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } __global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE *renum_gbl, INDEX_TYPE base_index, INDEX_TYPE max_element) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < max_element) { irenum[renum[idx]] = renum_gbl[idx] - base_index; idx += blockDim.x * gridDim.x; } } __global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, INDEX_TYPE base_index, INDEX_TYPE map_offset, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = map_offset + row; row += blockDim.x * gridDim.x; } } __global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *renumbering, INDEX_TYPE *mapping, INDEX_TYPE *map_offsets, int64_t *index_ranges, INDEX_TYPE part_id, INDEX_TYPE my_id, INDEX_TYPE base_index, INDEX_TYPE my_range, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows) { extern __shared__ volatile int reduction[]; int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4; int coopIdx = threadIdx.x % 4; while (row < num_rows) { int valid = 0; for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more) { int colIdx = col_indices[idx]; int part = -2; if (colIdx >= index_ranges[2 * part_id] && colIdx < index_ranges[2 * part_id + 1]) //the col index probably belongs to the partition I am working on { part = part_id; } else if (colIdx >= base_index && colIdx < base_index + my_range) //or points back to the owned partition { part = -1; } else //or else it points to a third partition { for (int i = 0; i < num_neighbors; i++) { if (colIdx >= index_ranges[2 * i] && colIdx < index_ranges[2 * i + 1]) { part = i; } } } if (part == -2) { col_indices[idx] = -1; #ifdef DEBUG printf("Column index encountered that does not belong to any of my neighbors!! %d\n", colIdx); #endif } else { if (part == -1) { col_indices[idx] = renumbering[colIdx - base_index]; valid++; } else { int new_col_idx = mapping[map_offsets[part] + colIdx - index_ranges[2 * part]]; if (new_col_idx >= 0) { valid++; col_indices[idx] = new_col_idx; } else { col_indices[idx] = -1; } } } } reduction[threadIdx.x] = valid; for (int s = 2; s > 0; s >>= 1) { if (coopIdx < s) { reduction[threadIdx.x] += reduction[threadIdx.x + s]; } __syncthreads(); } if (coopIdx == 0) { row_length[row] = reduction[threadIdx.x]; } row += gridDim.x * blockDim.x / 4; } } __global__ void map_col_indices(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, int64_t *halo_ranges, INDEX_TYPE *halo_renumbering, INDEX_TYPE *halo_rows, INDEX_TYPE *global_renumbering, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows, INDEX_TYPE num_rows_processed) { int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4; int coopIdx = threadIdx.x % 4; while (row < num_rows_processed) { for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) { int colIdx = col_indices[idx]; int part = 0; if (colIdx < num_rows) { part = -1; } else { colIdx = global_renumbering[colIdx]; for (int i = 0; i < num_neighbors; i++) { if (colIdx >= halo_ranges[2 * i] && colIdx < halo_ranges[2 * i + 1]) { part = i; break; } } } if (part == -1) { col_indices[idx] = colIdx; } else { col_indices[idx] = halo_renumbering[halo_rows[part] + colIdx - halo_ranges[2 * part]]; } } row += gridDim.x * blockDim.x / 4; } } template <class T> __global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { INDEX_TYPE dst_row = row; INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst = rows[dst_row]; for (int i = 0; i < old_rows[row + 1] - src_base; i++) { INDEX_TYPE colIdx = old_cols[src_base + i]; if (colIdx >= 0) { cols[dst] = colIdx; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; } dst++; } } row += blockDim.x * gridDim.x; } } __global__ void calc_gbl_renumbering(INDEX_TYPE *inv_renum, INDEX_TYPE *gbl_renum, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { gbl_renum[inv_renum[idx]] = idx; idx += blockDim.x * gridDim.x; } } template <typename ValueType> __global__ void write_diagonals(ValueType *values, INDEX_TYPE *diag, INDEX_TYPE *map, ValueType *output, INDEX_TYPE bsize, INDEX_TYPE size) { int nzPerBlock = blockDim.x / bsize; int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize; int vecIdx = threadIdx.x % bsize; if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; } while (row < size) { output[row * bsize + vecIdx] = values[diag[map[row]] * bsize + vecIdx]; row += gridDim.x * nzPerBlock; } } template <typename ValueType> __global__ void write_diagonals_back(ValueType *values, INDEX_TYPE *diag, ValueType *source, INDEX_TYPE bsize, INDEX_TYPE size) { int nzPerBlock = blockDim.x / bsize; int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize; int vecIdx = threadIdx.x % bsize; if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; } while (row < size) { values[diag[row]*bsize + vecIdx] = source[row * bsize + vecIdx]; row += gridDim.x * nzPerBlock; } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_full(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.get_num_rows(); int f_size = A.get_num_rows(); int diag = Ac.hasProps(DIAG); if (A.manager->B2L_rings[0].size() > 2) { FatalError("Aggregation_AMG_Level prepareNextLevelMatrix not implemented >1 halo rings", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } //get coarse -> fine global renumbering IVector renumbering(c_size); int num_blocks = min(4096, (c_size + 127) / 128); hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), renumbering.raw(), f_size, 0); cudaCheckError(); // // Step 0 - form halo matrices that are exported to neighbors // std::vector<Matrix<TConfig> > halo_rows(num_neighbors); std::vector<DistributedManager<TConfig> > halo_btl(num_neighbors); for (int i = 0; i < num_neighbors; i++ ) { int num_unique = Ac.manager->B2L_rings[i][1]; //prepare export halo matrices halo_btl[i].resize(1, 1); halo_btl[i].set_global_id(Ac.manager->global_id()); halo_btl[i].B2L_maps[0].resize(num_unique); halo_btl[i].B2L_rings[0].resize(2); halo_btl[i].B2L_rings[0][0] = 0; halo_btl[i].B2L_rings[0][1] = num_unique; halo_btl[i].set_index_range(A.manager->index_range()); halo_btl[i].set_base_index(A.manager->base_index()); //global indices of rows of the halo matrix thrust::copy(thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin() + num_unique), halo_btl[i].B2L_maps[0].begin()); cudaCheckError(); halo_rows[i].addProps(CSR); if (diag) { halo_rows[i].addProps(DIAG); } //calculate row length and row_offsets halo_rows[i].row_offsets.resize(num_unique + 1); thrust::transform(thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].end()), thrust::make_permutation_iterator(Ac.row_offsets.begin(), Ac.manager->B2L_maps[i].begin()), halo_rows[i].row_offsets.begin(), thrust::minus<IndexType>()); cudaCheckError(); thrust::exclusive_scan(halo_rows[i].row_offsets.begin(), halo_rows[i].row_offsets.end(), halo_rows[i].row_offsets.begin()); cudaCheckError(); //resize halo matrix IndexType num_nz = halo_rows[i].row_offsets[num_unique]; halo_rows[i].resize(num_unique, num_unique, num_nz, Ac.get_block_dimy(), Ac.get_block_dimx(), 1); //copy relevant rows and renumber their column indices num_blocks = min(4096, (num_unique + 127) / 128); hipLaunchKernelGGL(( export_matrix_elements) , dim3(num_blocks), dim3(128), 0, 0, Ac.row_offsets.raw(), Ac.col_indices.raw(), Ac.values.raw(), Ac.manager->B2L_maps[i].raw(), renumbering.raw(), halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), A.get_block_size(), num_unique); cudaCheckError(); if (diag) { hipLaunchKernelGGL(( export_matrix_diagonal) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size(), Ac.get_block_size(), Ac.manager->B2L_maps[i].raw(), halo_rows[i].values.raw() + halo_rows[i].row_offsets[halo_rows[i].get_num_rows()]*Ac.get_block_size(), num_unique); cudaCheckError(); } } Ac.manager->getComms()->exchange_matrix_halo(halo_rows, halo_btl, Ac); //--------------------- renumbering/reordering matrix, integrating halo ----------------------------- Ac.set_initialized(0); //number of owned rows c_size = Ac.manager->halo_offsets[0]; f_size = A.manager->halo_offsets[0]; num_blocks = min(4096, (c_size + 511) / 512); int rings = 1; // // Step 1 - calculate inverse renumbering (to global indices - base_index) // Ac.manager->inverse_renumbering.resize(c_size); thrust::transform(renumbering.begin(), renumbering.begin() + c_size, thrust::constant_iterator<IndexType>(A.manager->base_index()), Ac.manager->inverse_renumbering.begin(), thrust::minus<IndexType>()); cudaCheckError(); //big renumbering table for going from global index to owned local index IVector global_to_coarse_local(Ac.manager->index_range()); thrust::fill(global_to_coarse_local.begin(), global_to_coarse_local.begin() + Ac.manager->index_range(), -1); cudaCheckError(); hipLaunchKernelGGL(( calc_gbl_renumbering) , dim3(num_blocks), dim3(512), 0, 0, Ac.manager->inverse_renumbering.raw(), global_to_coarse_local.raw(), c_size); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); cudaCheckError(); // // Step 2 - create big mapping table of all halo indices we received (this may use a little too much memory sum(fine nodes per neighbor) // thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1); int max_num_rows = 0; for (int i = 0; i < num_neighbors; i++) { neighbor_rows[i] = halo_rows[i].manager->index_range(); max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows(); } thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin()); cudaCheckError(); int total_rows_of_neighbors = neighbor_rows[num_neighbors]; IVector halo_mapping(total_rows_of_neighbors); thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1); cudaCheckError(); for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( create_halo_mapping) , dim3(num_blocks), dim3(128), 0, 0, halo_mapping.raw() + neighbor_rows[i], halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring], halo_btl[i].base_index(), Ac.manager->halo_offsets[ring * num_neighbors + i], size); } } cudaCheckError(); // // Step 3 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // INDEX_TYPE owned_nnz = Ac.row_offsets[c_size]; IVector neighbor_rows_d(num_neighbors + 1); thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin()); cudaCheckError(); //map column indices of my own matrix (the ones that point outward) hipLaunchKernelGGL(( map_col_indices) , dim3(num_blocks), dim3(512), 0, 0, Ac.row_offsets.raw() + Ac.manager->num_interior_nodes(), Ac.col_indices.raw(), Ac.manager->halo_ranges.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), renumbering.raw(), num_neighbors, c_size, c_size - Ac.manager->num_interior_nodes()); cudaCheckError(); IVector temp_row_len(max_num_rows); for (int i = 0; i < num_neighbors; i++) { //map column indices of halo matrices int size = halo_rows[i].get_num_rows(); int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( map_col_indices_and_count_rowlen) , dim3(num_blocks), dim3(128), 128 * sizeof(INDEX_TYPE), 0, halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), temp_row_len.raw(), global_to_coarse_local.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), Ac.manager->halo_ranges.raw(), i, Ac.manager->global_id(), Ac.manager->base_index(), Ac.manager->index_range(), num_neighbors, size); for (int ring = 0; ring < rings; ring++) { thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], Ac.row_offsets.begin() + Ac.manager->halo_offsets[ring * num_neighbors + i]); } } cudaCheckError(); INDEX_TYPE old_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; thrust::exclusive_scan(Ac.row_offsets.begin() + c_size, Ac.row_offsets.end(), Ac.row_offsets.begin() + c_size, owned_nnz); cudaCheckError(); // // Step 4 - consolidate column indices and values // int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; Ac.col_indices.resize(new_nnz); Ac.values.resize((new_nnz + 1 + diag * (Ac.row_offsets.size() - 2)) * A.get_block_size()); if (diag) { MVector diags(c_size * Ac.get_block_size()); thrust::copy(Ac.values.begin() + old_nnz * Ac.get_block_size(), Ac.values.begin() + old_nnz * Ac.get_block_size() + c_size * Ac.get_block_size(), diags.begin()); thrust::copy(diags.begin(), diags.begin() + c_size * Ac.get_block_size(), Ac.values.begin() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size()); cudaCheckError(); } int cumulative_num_rows = c_size; for (int i = 0; i < num_neighbors; i++) { for (int ring = 0; ring < rings; ring++) { int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (num_rows + 127) / 128); hipLaunchKernelGGL(( reorder_whole_matrix) , dim3(num_blocks), dim3(128), 0, 0, halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), Ac.row_offsets.raw() + Ac.manager->halo_offsets[ring * num_neighbors + i], Ac.col_indices.raw(), Ac.values.raw(), Ac.get_block_size(), num_rows); if (diag) { thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*Ac.get_block_size(), halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*Ac.get_block_size(), Ac.values.begin() + (Ac.row_offsets[Ac.get_num_rows()] + cumulative_num_rows)*Ac.get_block_size()); cumulative_num_rows += num_rows; } } } cudaCheckError(); Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]); Ac.set_num_rows(Ac.get_num_cols()); Ac.set_num_nz(new_nnz); Ac.delProps(COO); Ac.set_initialized(1); Ac.computeDiagonal(); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_diag(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.manager->halo_offsets[0]; int f_size = A.manager->halo_offsets[0]; int diag = Ac.hasProps(DIAG); Ac.manager->inverse_renumbering.resize(c_size); //get coarse -> fine renumbering int num_blocks = min(4096, (c_size + 127) / 128); hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index()); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); if (!diag) { Ac.computeDiagonal(); } Ac.set_initialized(1); std::vector<MVector> diagonals(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = Ac.manager->B2L_rings[i][Ac.manager->B2L_rings.size() - 1]; diagonals[i].resize(Ac.get_block_size()*size); int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( write_diagonals) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw(), Ac.diag.raw(), Ac.manager->B2L_maps[i].raw(), diagonals[i].raw(), Ac.get_block_size(), size); } cudaCheckError(); Ac.manager->getComms()->exchange_vectors(diagonals, Ac, this->tag * 100 + 10 + 2); for (int i = 0; i < num_neighbors; i++) { int size = Ac.manager->halo_offsets[i + 1] - Ac.manager->halo_offsets[i]; if (Ac.hasProps(DIAG)) { thrust::copy(diagonals[i].begin(), diagonals[i].begin() + Ac.get_block_size()*size, Ac.values.begin() + Ac.get_block_size() * (Ac.diagOffset() + Ac.manager->halo_offsets[i])); } else { int num_blocks = min(4096, (size + 127) / 128); hipLaunchKernelGGL(( write_diagonals_back) , dim3(num_blocks), dim3(128), 0, 0, Ac.values.raw(), Ac.diag.raw() + Ac.manager->halo_offsets[i], diagonals[i].raw(), Ac.get_block_size(), size); } } cudaCheckError(); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_none(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.manager->halo_offsets[0]; int f_size = A.manager->halo_offsets[0]; int diag = Ac.hasProps(DIAG); Ac.manager->inverse_renumbering.resize(c_size); //get coarse -> fine renumbering int num_blocks = min(4096, (c_size + 127) / 128); hipLaunchKernelGGL(( coarse_to_global) , dim3(num_blocks), dim3(128), 0, 0, this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index()); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); Ac.set_initialized(1); if (!diag) { Ac.computeDiagonal(); } } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (m_matrix_halo_exchange == 0) { this->prepareNextLevelMatrix_none(A, Ac); } else if (m_matrix_halo_exchange == 1) { this->prepareNextLevelMatrix_diag(A, Ac); } else if (m_matrix_halo_exchange == 2) { this->prepareNextLevelMatrix_full(A, Ac); } else { FatalError("Invalid Aggregation matrix_halo_exchange parameter", AMGX_ERR_NOT_IMPLEMENTED); } } __global__ void set_halo_rowlen(INDEX_TYPE *work, INDEX_TYPE *output, INDEX_TYPE size, INDEX_TYPE diag) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { if (work[idx + 1] - work[idx] > 0) { output[idx] += work[idx + 1] - work[idx] - (1 - diag); } idx += blockDim.x * gridDim.x; } } template <typename T> __global__ void append_halo_nz(INDEX_TYPE *row_offsets, INDEX_TYPE *new_row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *new_col_indices, T *values, T *new_values, INDEX_TYPE size, INDEX_TYPE diag, INDEX_TYPE halo_offset, INDEX_TYPE block_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { int add_diag = !diag; if (!diag && new_col_indices[new_row_offsets[idx]] != -1) { add_diag = 0; } //if diag or there is already soimething in the row, then don't add diagonal nonzero (inside diag) int append_offset = -1; for (int i = new_row_offsets[idx]; i < new_row_offsets[idx + 1]; i++) { if (new_col_indices[i] == -1) {append_offset = i; break;} } for (int i = row_offsets[idx]; i < row_offsets[idx + 1]; i++) { if (diag && i == row_offsets[idx]) //if outside diag and this is the first nonzero in a non-empty row, overwrite diagonal value { for (int j = 0; j < block_size; j++) { new_values[(new_row_offsets[size] + halo_offset + idx)*block_size + j] = values[(row_offsets[size] + halo_offset + idx) * block_size + j]; } } int col_idx = col_indices[i]; if (append_offset == -1 && (col_idx != halo_offset + idx)) {printf("ERROR: append offset is -1 but row has nonzeros in it old %d to %d new %d to %d\n", row_offsets[idx], row_offsets[idx + 1], new_row_offsets[idx], new_row_offsets[idx + 1]); append_offset = 0;} if (col_idx != halo_offset + idx || add_diag) { new_col_indices[append_offset] = col_idx; for (int j = 0; j < block_size; j++) { new_values[append_offset * block_size + j] = values[i * block_size + j]; } append_offset++; } } idx += blockDim.x * gridDim.x; } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseB2LMaps(std::vector<IVector> &in_coarse_B2L_maps) { Matrix<TConfig> &A = this->getA(); m_num_all_aggregates = m_num_aggregates; int num_neighbors = A.manager->neighbors.size(); IndexType max_b2l = 0; for (int i = 0; i < num_neighbors; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; } IVector B2L_aggregates(max_b2l); IVector indices(max_b2l); for (int i = 0; i < num_neighbors; i++ ) { int size = A.manager->B2L_rings[i][1]; thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0); thrust::sequence(indices.begin(), indices.begin() + size); //substitute coarse aggregate indices for fine boundary nodes thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size), B2L_aggregates.begin()); //find the unique ones thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()); IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin(); in_coarse_B2L_maps[i].resize(num_unique); //sort it back so we have the original ordering thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin()); thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, in_coarse_B2L_maps[i].begin()); } cudaCheckError(); } __global__ void populate_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE *output, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { output[flags[maps[indices[idx]]]] = maps[indices[idx]]; idx += blockDim.x * gridDim.x; } } __global__ void flag_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[maps[indices[idx]]] = 1; idx += blockDim.x * gridDim.x; } } __global__ void flag_halo_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[indices[idx] - offset] = 1; idx += blockDim.x * gridDim.x; } } __global__ void apply_halo_aggregate_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *output, INDEX_TYPE offset, INDEX_TYPE aggregates_offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { output[idx] = flags[indices[idx] - offset] + aggregates_offset; idx += blockDim.x * gridDim.x; } } // renumbering the aggregates/communicationg with neighbors template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::setNeighborAggregates() { Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); m_num_all_aggregates = m_num_aggregates; /* WARNING: the matrix reordering always happens inside createRenumbering routine. There are three ways to get to this routine 1. matrix_upload_all -> uploadMatrix -> initializeUploadReorderAll -> reorder_matrix -> createRenumbering 2. read_system_distributed -> renumberMatrixOneRing -> reorder_matrix_owned -> createRenumbering 3. solver_setup -> ... -> AMG_Level::setup -> createCoarseMatrices -> setNeighborAggregates -> createRenumbering If you are reading the renumbering from file you might need to add intercept code in if statement below, otherwise this routine will exit before calling createRenumbering routine (in case of single or disjoint partitions). */ if (this->getA().is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); // // Step 0 - set up coarse matrix metadata // if (Ac.manager == NULL) { Ac.manager = new DistributedManager<T_Config>(); } Ac.manager->resize(A.manager->neighbors.size(), 1); Ac.manager->A = &Ac; int f_size = A.get_num_rows(); Ac.manager->setComms(A.manager->getComms()); Ac.manager->set_global_id(A.manager->global_id()); Ac.manager->neighbors = A.manager->neighbors; Ac.manager->set_base_index(A.manager->base_index()); Ac.manager->halo_ranges = A.manager->halo_ranges; Ac.manager->set_index_range(A.manager->index_range()); //-------------------------------------- Section 1 - renumbering ----------------------------------------------------------- // // Step 1 - calculate coarse level B2L maps - any aggregate that has a fine boundary node, becomes a coarse boundary node // m_num_all_aggregates = m_num_aggregates; int vec_size = m_num_aggregates + 1; //A.manager->num_boundary_nodes()+1; IVector B2L_aggregates(vec_size); for (int i = 0; i < A.manager->neighbors.size(); i++) { thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, 0); int size = A.manager->B2L_rings[i][1]; int block_size = 128; int grid_size = ::min( 4096, ( size + block_size - 1 ) / block_size); hipLaunchKernelGGL(( flag_coarse_boundary) , dim3(grid_size), dim3(block_size), 0, 0, B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), size); thrust::exclusive_scan(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, B2L_aggregates.begin()); (Ac.manager->B2L_maps)[i].resize(B2L_aggregates[vec_size - 1]); hipLaunchKernelGGL(( populate_coarse_boundary) , dim3(grid_size), dim3(block_size), 0, 0, B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), Ac.manager->B2L_maps[i].raw(), size); } cudaCheckError(); for (int i = 0; i < num_neighbors; i++) { Ac.manager->B2L_rings[i].resize(2); Ac.manager->B2L_rings[i][0] = 0; Ac.manager->B2L_rings[i][1] = Ac.manager->B2L_maps[i].size(); } DistributedArranger<T_Config> *prep = new DistributedArranger<T_Config>; prep->initialize_B2L_maps_offsets(Ac, 1); delete prep; Ac.set_num_rows(m_num_aggregates); IVector renumbering(m_num_aggregates + 1); /* +1 is actually not needed, it will be resized in createRenumbering */ Ac.manager->createRenumbering(renumbering); // // Step 2 - renumber aggregates, so boundary nodes will have higher index than interior ones (based on the renumberiong we have been calculating) // /* WARNING: 1. Thrust scatter and gather routines seem more appropriate here, but they implicitly assume that the input and output have certain size correlation, which is not matched by vectors in our case. The only remaining option is to use make_permutation as is done below. Example of Thrust scatter and gather calls IVector ttt(f_size,-1); thrust::scatter(this->m_aggregates.begin(), this->m_aggregates.begin()+f_size, renumbering.begin(), ttt.begin()); thrust::gather(renumbering.begin(), renumbering.end(), this->m_aggregates.begin(), ttt.begin()); thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin()); 2. The original thrust composite call is illegal because it uses the same array (m_aggregates) for input and output. thrust::copy(thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()), thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()+f_size), this->m_aggregates.begin()); Although it somehow still works, it is much safer to use explicit temporary storage for the intermediate result. */ /* WARNING: must save unreordered aggregates for later use before reordering them. */ IVector unreordered_aggregates(this->m_aggregates); /* WARNING: change Thrust call to explicitly use temporary storage for the intermediate result. The earlier version is illegal, but somehow still works. */ IVector ttt(f_size, -1); thrust::copy(thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()), thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin() + f_size), ttt.begin()); thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin()); cudaCheckError(); //we don't need renumbering anymore, it will be identity on the coarse level //-------------------------------------- Section 2 - communication ----------------------------------------------------------- // // Step 3 - populate aggregates_fine_idx, which stores for every fine node the original global index of the aggregate (which is lowest global index of nodes aggregated together) // // // These are different when we do /don't do matrix halo exchanges - when we do we need global indices to match nodes, // and in this case Ac after computeA will not have the same ordering of halo nodes as after prepareNextLevel_full. // However when we do not do matrix halo exchange we are only interested in the ordering of halo nodes on the coarse level, // and we can get that by exchanging the (already renumbered) aggregates vector. // if (m_matrix_halo_exchange == 2) { //Find original global indices of nodes that have the minimum id in the aggregates. thrust::copy(thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin()), thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin() + f_size), this->m_aggregates_fine_idx.begin()); thrust::transform(this->m_aggregates_fine_idx.begin(), this->m_aggregates_fine_idx.begin() + f_size, thrust::constant_iterator<IndexType>(A.manager->base_index()), this->m_aggregates_fine_idx.begin(), thrust::plus<IndexType>()); //communicate this->m_aggregates_fine_idx.set_block_dimx(1); this->m_aggregates_fine_idx.set_block_dimy(1); m_aggregates_fine_idx.dirtybit = 1; A.manager->exchange_halo(m_aggregates_fine_idx, this->tag * 100 + 1 * 10 + 0); } else { //communicate this->m_aggregates.set_block_dimx(1); this->m_aggregates.set_block_dimy(1); m_aggregates.dirtybit = 1; /* WARNING: you should exchange unreordered aggregates, and append them to your own reordered aggregates, to conform to asusmptions done by distributed_mamanger. */ //A.manager->exchange_halo(m_aggregates, this->tag*100+1*10+0); //wrong A.manager->exchange_halo(unreordered_aggregates, this->tag * 100 + 1 * 10 + 0); thrust::copy(unreordered_aggregates.begin() + f_size, unreordered_aggregates.end(), this->m_aggregates.begin() + f_size); } cudaCheckError(); // // Step 4 - consolidate neighbors' aggregates into own list to be able to perform Galerkin product with the n-ring halo // IVector &exchanged_aggregates = m_matrix_halo_exchange == 2 ? this->m_aggregates_fine_idx : this->m_aggregates; int min_index = thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0xFFFFFFF, thrust::minimum<int>()); int max_index = thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0, thrust::maximum<int>()); cudaCheckError(); int s_size = max_index - min_index + 2; IVector scratch(s_size); for (int i = 0; i < num_neighbors; i++) { int size = A.manager->halo_offsets[i + 1] - A.manager->halo_offsets[i]; //Could also use local minimums to perform the same operation. The results are the same. //int min_local = thrust::reduce(exchanged_aggregates.begin()+A.manager->halo_offsets[i], exchanged_aggregates.begin()+A.manager->halo_offsets[i+1], (int)0xFFFFFFF, thrust::minimum<int>()); thrust::fill(scratch.begin(), scratch.begin() + s_size, 0); int block_size = 128; int grid_size = ::min( 4096, ( size + block_size - 1 ) / block_size); hipLaunchKernelGGL(( flag_halo_indices) , dim3(grid_size), dim3(block_size), 0, 0, scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, size); thrust::exclusive_scan(scratch.begin(), scratch.begin() + s_size, scratch.begin()); hipLaunchKernelGGL(( apply_halo_aggregate_indices) , dim3(grid_size), dim3(block_size), 0, 0, scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], this->m_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, m_num_all_aggregates, size); Ac.manager->halo_offsets[i] = m_num_all_aggregates; m_num_all_aggregates += scratch[s_size - 1]; } cudaCheckError(); Ac.manager->halo_offsets[num_neighbors] = m_num_all_aggregates; } //TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the // nonzero values template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::consolidateVector(VVector &x) { int my_id = this->getA().manager->global_id(); if (this->getA().manager->isRootPartition()) { // Here all partitions being consolidated should have same vector size, see TODO above INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate(); for (int i = 0; i < num_parts; i++) { int current_part = this->getA().manager->getPartsToConsolidate()[i]; // Vector has been set to correct size if (current_part != my_id) { //printf("Root partition %d receiving %d -> %d and %d -> %d (total %d)\n", this->getA().manager->global_id(), this->getA().manager->getConsolidationArrayOffsets()[i], this->getA().manager->getConsolidationArrayOffsets()[i+1], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i+1], (int)x.size()/x.get_block_size()); this->getA().manager->getComms()->recv_vector(x, current_part, 10000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i])); this->getA().manager->getComms()->recv_vector(x, current_part, 20000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i])); } } } else { int my_destination_part = this->getA().manager->getMyDestinationPartition(); int i_off, i_size, b_off, b_size; this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size); // Here all partitions being consolidated should have same vector size, see TODO above this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 10000 + my_id, i_off * x.get_block_size(), i_size * x.get_block_size()); this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 20000 + my_id, b_off * x.get_block_size(), b_size * x.get_block_size()); } } //TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the // nonzero values template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x) { if (this->getA().manager->isRootPartition()) { INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate(); for (int i = 0; i < num_parts; i++) { int current_part = this->getA().manager->getPartsToConsolidate()[i]; // Vector has been set to correct size if (current_part != this->getA().manager->global_id()) { this->getA().manager->getComms()->send_vector_async(x, current_part, 30000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i])); this->getA().manager->getComms()->send_vector_async(x, current_part, 40000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i])); } } } else { int my_destination_part = this->getA().manager->getMyDestinationPartition(); // Vector x is of unknown size int i_off, i_size, b_off, b_size; this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size); this->getA().manager->getComms()->recv_vector(x, my_destination_part, 30000 + this->getA().manager->global_id(), i_off * x.get_block_size(), i_size * x.get_block_size()); this->getA().manager->getComms()->recv_vector(x, my_destination_part, 40000 + this->getA().manager->global_id(), b_off * x.get_block_size(), b_size * x.get_block_size()); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseVertices() { profileSubphaseFindAggregates(); //Set the aggregates this->Profile.tic("setAggregates"); this->m_selector->setAggregates(this->getA(), this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates); this->Profile.toc("setAggregates"); if ( this->m_print_aggregation_info ) { this->m_selector->printAggregationInfo( this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates ); } this->getA().template setParameter< int > ("aggregates_num", this->m_num_aggregates); // ptr to aaggregates } // Creating the next level template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseMatrices() { Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); profileSubphaseFindAggregates(); int num_parts, num_fine_neighbors, my_id; if (!A.is_matrix_singleGPU()) { num_parts = A.manager->getComms()->get_num_partitions(); num_fine_neighbors = A.manager->neighbors.size(); my_id = A.manager->global_id(); } else { num_parts = 1; num_fine_neighbors = 0; my_id = 0; } if (!A.is_matrix_singleGPU() && this->isConsolidationLevel()) { // ---------------------------------------------------- // Consolidate multiple fine matrices into one coarse matrix // ---------------------------------------------------- // ---------------- // Step 1 // Decide which partitions should be merged together, store in destination_partitions vector // --------------- IVector_h &destination_part = A.manager->getDestinationPartitions(); int my_destination_part = A.manager->getMyDestinationPartition(); if (my_destination_part >= num_parts) { FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED); } // Create mapping from coarse partition indices (ranks on the coarse consolidated level) to partition indices on the fine level (ranks on the fine level) IVector_h coarse_part_to_fine_part = destination_part; thrust::sort(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()); cudaCheckError(); coarse_part_to_fine_part.erase(thrust::unique(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()), coarse_part_to_fine_part.end()); cudaCheckError(); //Then, the number of coarse partitions is simply the size of this vector int num_coarse_partitions = coarse_part_to_fine_part.size(); // Create mapping from fine partition indices to coarse partition indices, with fine partitions that are merging together having the same coarse indices IVector_h fine_part_to_coarse_part(num_parts); thrust::lower_bound(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end(), destination_part.begin(), destination_part.end(), fine_part_to_coarse_part.begin()); cudaCheckError(); // Create mapping from this specific partition's neighbors to consolidated coarse neighbors, but using their fine index (aka. destination partition indices for my neighbors) IVector_h fine_neigh_to_fine_part; A.manager->createNeighToDestPartMap(fine_neigh_to_fine_part, A.manager->neighbors, destination_part, num_fine_neighbors); // Create mapping from consolidated coarse neighbors to fine partition indices (even if the current partition is not going to be a root) IVector_h coarse_neigh_to_fine_part; int num_coarse_neighbors; A.manager->createConsolidatedNeighToPartMap(coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, destination_part, num_coarse_neighbors); // Create mapping from fine neighbors to coarse neighbors, with fine neighbors this partition is merging with labeled with -1 IVector_h fine_neigh_to_coarse_neigh; A.manager->createNeighToConsNeigh(fine_neigh_to_coarse_neigh, coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, num_fine_neighbors); /* EXAMPLE Take the following partition graph (that describes connections between partitions, vertices are the partitions themselves), this is the same graph that is used in the setup example number of partitions num_parts=12 CSR row_offsets [0 4 8 13 21 25 32 36 41 46 50 57 61] CSR col_indices [0 1 3 8 0 1 2 3 1 2 3 4 5 0 1 2 3 4 5 8 10 2 4 5 6 2 3 4 5 6 7 10 4 5 6 7 5 6 7 9 10 0 3 8 10 11 7 9 10 11 3 5 7 8 9 10 11 8 9 10 11] destination_part = [0 0 0 0 4 4 4 4 8 8 8 8] coarse_part_to_fine_part = [0 4 8] num_coarse_partitions = 3 fine_part_to_coarse_part = [0 0 0 0 1 1 1 1 2 2 2 2] original neighbor lists correspond to the rows of the matrix, minus the diagonal elements: (part 0)[1 3 8] (part 3)[0 1 2 4 5 8 10] (part 10)[3 5 7 8 9 11] fine_neigh_to_fine_part (part 0)[0 0 2] (part 3)[0 0 0 0 1 2 2] (part 10)[0 1 1 2 2 2] coarse_neigh_to_fine_part (part 0)[8] (part 3)[4 8] (part 10)[0 4] fine_neigh_to_coarse_neigh (part 0)[-1 -1 0] (part 3)[-1 -1 -1 0 0 1 1] (part 10)[0 1 1 -1 -1 -1] */ // -------------------------- // Step 2 // Create coarse B2L_maps, by mapping fine B2L maps to coarse indices using this->m_aggregates and eliminating duplicates // -------------------------- std::vector<IVector> coarse_B2L_maps(num_fine_neighbors); m_num_all_aggregates = m_num_aggregates; int num_neighbors_temp = A.manager->neighbors.size(); int num_rings = A.manager->B2L_rings[0].size() - 1; if (num_rings != 1) { FatalError("num_rings > 1 not supported in consolidation\n", AMGX_ERR_NOT_IMPLEMENTED); } IndexType max_b2l = 0; for (int i = 0; i < num_neighbors_temp; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; } IVector B2L_aggregates(max_b2l); IVector indices(max_b2l); //TODO: use the algorithm from setNeighborAggregates() for (int i = 0; i < num_neighbors_temp; i++ ) { int size = A.manager->B2L_rings[i][1]; thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0); thrust::sequence(indices.begin(), indices.begin() + size); //substitute coarse aggregate indices for fine boundary nodes thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size), B2L_aggregates.begin()); //find the unique ones thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()); IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin(); coarse_B2L_maps[i].resize(num_unique); //sort it back so we have the original ordering thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin()); thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, coarse_B2L_maps[i].begin()); } cudaCheckError(); /* * EXAMPLE say, partition 3 has the following coarse B2L_maps: neighbors [0 1 2 4 5 8 10] B2L_maps[0(=0)] = [6 7 8] B2L_maps[1(=1)] = [8 9 10] B2L_maps[2(=2)] = [10 11 12 13] B2L_maps[3(=4)] = [13 14 15] B2L_maps[4(=5)] = [15 16 17] B2L_maps[5(=8)] = [6 18 19] B2L_maps[6(=10)] = [17 20 19] */ // --------------------------------------------------- // Step 3 // create new B2L maps for each merged destination neighbor and drop B2L maps to neighbors we are merging with // --------------------------------------------------- std::vector<IVector> dest_coarse_B2L_maps; A.manager->consolidateB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); /* * EXAMPLE Then, merging the coarse B2L maps on partition 3, we get: coarse_neigh_to_fine_part [4 8] dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17] dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20] */ // ----------------------- // Step 4 // Create interior-boundary renumbering of aggregates according to dest_coarse_B2L_maps // ----------------------- // Now renumber the aggregates with all interior aggregates first, boundary aggregates second int num_interior_aggregates; //returned by createAggregatesRenumbering int num_boundary_aggregates; //returned by createAggregatesRenumbering IVector renumbering; //returned by createAggregatesRenumbering // Following calls create renumbering array and modifies B2L_maps A.manager->createAggregatesRenumbering(renumbering, dest_coarse_B2L_maps, this->m_num_aggregates, num_coarse_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); /* * EXAMPLE Partition 3 will get a renumbering vector of size 21, for the 21 owned agggregates: [0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20] num_interior_aggregates = 12 num_boundary_aggregates = 9 */ // ------------------------------------------------- // Step 5 // Determine whether root partition, make list of partitions merged into one // ------------------------------------------------ // Check if I'm root partition and how fine partitions (including myself) are merging into me bool is_root_partition = false; int num_fine_parts_to_consolidate = 0; IVector_h fine_parts_to_consolidate; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { is_root_partition = true; num_fine_parts_to_consolidate++; } } fine_parts_to_consolidate.resize(num_fine_parts_to_consolidate); int count = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { fine_parts_to_consolidate[count] = i; count++; } } //save this information as state, as this will also be required during solve for restriction/prolongation A.manager->setIsRootPartition(is_root_partition); A.manager->setNumPartsToConsolidate(num_fine_parts_to_consolidate); A.manager->setPartsToConsolidate(fine_parts_to_consolidate); /* * EXAMPLE isRootPartition is true for partitions 0,4,8 false for others num_fine_parts_to_consolidate = 4 for partitions 0,4,8 fine_parts_to_consolidate (part 0)[0 1 2 3] (part 4)[4 5 6 7] (part 8)[8 9 10 11] */ // ---------------------- // Step 6 // Compute number of interior, boundary and total nodes in the consolidated coarse matrix. Create offsets so that partitions being merged together will have their aggregate indices ordered like this: // [num_interior(fine_parts_to_consolidate[0]] num_interior(fine_parts_to_consolidate[1]] ... num_interior(fine_parts_to_consolidate[num_fine_parts_to_consolidate] // num_boundary(fine_parts_to_consolidate[0]] num_boundary(fine_parts_to_consolidate[1]] ... num_boundary(fine_parts_to_consolidate[num_fine_parts_to_consolidate] ] // ---------------------- // Gather to get number of interior/boundary aggregates of neighbors I will merge with std::vector<IVector_h> vertex_counts; int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged; int total_rows_in_merged; //Computes these offsets on the root, sends them back A.manager->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_aggregates, num_boundary_aggregates, vertex_counts, fine_parts_to_consolidate, num_fine_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, A.manager->getComms()); //Partitions save these offsets, as it will be required during solve restriction/prolongation A.manager->setConsolidationOffsets(interior_offset, num_interior_aggregates, boundary_offset + num_interior_aggregates, num_boundary_aggregates); /* * EXAMPLE For root partition 0, say we have the following interior/boundary counts (note that partition 1 has 0 boundary, as it is only connected to partitions it is merging with) part 0 - interior: 10 boundary 3 part 1 - interior: 18 part 2 - interior: 10 boundary 16 part 3 - interior: 12 boundary 9 interior_offset for partitions 0,1,2,3: 0 10 28 38 (total_interior_rows_in_merged 50) boundary_offset for partitions 0,1,2,3: 0 3 3 19 (total_boundary_rows_in_merged 28) */ // ---------------------- // Step 7 // Each partition renumbers its aggregates and dest_coarse_B2L_maps using offsets computed in Step 6 and permutation in Step 4 // ---------------------- // Kernel to renumber the aggregates int block_size = 128; int grid_size = ::min( 4096, ( A.manager->halo_offsets[0] + block_size - 1 ) / block_size); hipLaunchKernelGGL(( renumberAggregatesKernel) , dim3(grid_size), dim3(block_size) , 0, 0, renumbering.raw(), interior_offset, boundary_offset, this->m_aggregates.raw(), A.manager->halo_offsets[0], num_interior_aggregates, renumbering.size()); cudaCheckError(); for (int i = 0; i < num_coarse_neighbors; i++) { thrust::transform(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].end(), thrust::constant_iterator<IndexType>(boundary_offset), dest_coarse_B2L_maps[i].begin(), thrust::plus<IndexType>()); } cudaCheckError(); /* * EXAMPLE Partition 3 had a renumbering vector: [0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20] which is now adjusted to account for the consolidated coarse matrices' indices: [38 39 40 41 42 43 74 44 45 46 47 48 49 69 70 71 72 73 75 76 77] And the dest_coarse_B2L_maps, which looked like: dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17] dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20] is now: dest_coarse_B2L_maps[0(=4)] = [69 70 71 72 73] dest_coarse_B2L_maps[1(=8)] = [74 73 75 76 77] */ // ------------------------------------------------- // Step 8 // Send dest_coarse_B2L_maps to root partitions // ------------------------------------------------ // Each fine partition sends to its root the number of coarse neighbors it has, their ids, and the number of boundary nodes for each coarse neighbor IVector_h num_bdy_per_coarse_neigh(num_coarse_neighbors); for (int i = 0; i < num_coarse_neighbors; i++) { num_bdy_per_coarse_neigh[i] = dest_coarse_B2L_maps[i].size(); } IVector_h consolidated_coarse_neigh_to_fine_part; //consolidated list of coarse neighbors for the root partition, using fine partition indices int num_consolidated_neighbors = 0; std::vector<IVector> consolidated_B2L_maps; //concatenates dest_coarse_B2L_maps received from partitions that are merging into the same root and pointing to the same destination coarse neighbor A.manager->consolidateB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_neigh_to_fine_part, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, A.manager->getComms()); // // Step 9 - figuring out halo aggregate IDs // //Now we need to update halo aggregate IDs - this is just a halo exchange on this->m_aggregates between partitions //that are being merged together, but we need to send other halos to the root to come up with the halo renumbering //TODO: separate transactions, send "real halo" to the root nodes (coarse neighbors) immediately //Step 9.1: takes care of synchronizing the aggregate IDs between partitions we are merging together and got consistent halo aggregate IDs for neighbor we are not merging with (which are going to be sent to the root in 9.2) A.manager->exchange_halo(this->m_aggregates, 6666); /* * EXAMPLE 2 This example is independent from the previous ones. Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4 Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone. This example details the renumbering of halo indices on partition 0 and partition 1. After the exchange halo, we have: this->m_aggregates on partition 0: [(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)] [(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)] aggregates on partition 1: [(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)] [(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)] indices in (fine halo from part 0) and (fine halo from part 1) actually contain interior aggregate indices (if they are not connected to partitions 2,3 or 4), because the boundary is disappearing there. Indices in halo regions contain remote-local indices. This example is used throughout consolidateAndRenumberHalos */ //Step 9.2 - 9.5 IVector_h halo_offsets(num_consolidated_neighbors + 1, 0); A.manager->consolidateAndRenumberHalos(this->m_aggregates, A.manager->halo_offsets, halo_offsets, A.manager->neighbors, num_fine_neighbors, consolidated_coarse_neigh_to_fine_part, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, this->m_num_all_aggregates, A.manager->getComms()); if (is_root_partition) { for (int i = 0; i < consolidated_B2L_maps.size(); i++) { thrust::sort(consolidated_B2L_maps[i].begin(), consolidated_B2L_maps[i].end()); } cudaCheckError(); } // Step 10 do the Galerkin product // ViewType oldView = this->getA().currentView(); this->getA().setView(ALL); // If we reuse the level we keep the previous restriction operator this->Profile.tic("computeR"); computeRestrictionOperator(); this->Profile.toc("computeR"); profileSubphaseComputeCoarseA(); this->Profile.tic("computeA"); Ac.copyAuxData(&A); if (Ac.manager == NULL) { Ac.manager = new DistributedManager<TConfig>(); } this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates); Ac.setColsReorderedByColor(false); ViewType oldViewC = Ac.currentView(); Ac.setView(FULL); this->Profile.toc("computeA"); // // Step 11, send matrices to root, consolidate // if (!is_root_partition) { A.manager->getComms()->send_vector_async(Ac.row_offsets, my_destination_part, 1111); A.manager->getComms()->send_vector_async(Ac.col_indices, my_destination_part, 1112); A.manager->getComms()->send_vector_async(Ac.values, my_destination_part, 1113); } else { int total_num_rows = this->m_num_all_aggregates; IVector new_row_offsets(total_num_rows + 1, 0); //if diags are inside then we won't be counting those twice when computing halo row length if (!Ac.hasProps(DIAG)) { thrust::fill(new_row_offsets.begin() + halo_offsets[0], new_row_offsets.begin() + halo_offsets[num_consolidated_neighbors], 1); cudaCheckError(); } std::vector<IVector> recv_row_offsets(num_fine_parts_to_consolidate); std::vector<VecInt_t> num_nz(num_fine_parts_to_consolidate); IVector *work_row_offsets; std::vector<VecInt_t> index_offset_array(2 * num_fine_parts_to_consolidate + 1); int interior_offset = 0; int boundary_offset = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { boundary_offset += vertex_counts[i][0]; } int max_num_nz = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; //receive row offsets if (current_part != my_id) { recv_row_offsets[i].resize(total_num_rows + 1); A.manager->getComms()->recv_vector(recv_row_offsets[i], current_part, 1111); work_row_offsets = &(recv_row_offsets[i]); num_nz[i] = (*work_row_offsets)[work_row_offsets->size() - 1]; max_num_nz = max_num_nz > num_nz[i] ? max_num_nz : num_nz[i]; } else { work_row_offsets = &(Ac.row_offsets); num_nz[i] = Ac.get_num_nz(); } //Get interior row length thrust::transform(work_row_offsets->begin() + interior_offset + 1, work_row_offsets->begin() + interior_offset + vertex_counts[i][0] + 1, work_row_offsets->begin() + interior_offset, new_row_offsets.begin() + interior_offset, thrust::minus<IndexType>()); cudaCheckError(); //Get boundary row length thrust::transform(work_row_offsets->begin() + boundary_offset + 1, work_row_offsets->begin() + boundary_offset + vertex_counts[i][1] + 1, work_row_offsets->begin() + boundary_offset, new_row_offsets.begin() + boundary_offset, thrust::minus<IndexType>()); cudaCheckError(); //Increment halo row length by one for every nonzero that is an edge from the halo into this partition int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0]; const int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); hipLaunchKernelGGL(( set_halo_rowlen) , dim3(num_blocks), dim3(block_size), 0, 0, work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], size, Ac.hasProps(DIAG)); cudaCheckError(); index_offset_array[i] = interior_offset; index_offset_array[num_fine_parts_to_consolidate + i] = boundary_offset; interior_offset += vertex_counts[i][0]; boundary_offset += vertex_counts[i][1]; index_offset_array[i + 1] = interior_offset; index_offset_array[num_fine_parts_to_consolidate + i + 1] = boundary_offset; } A.manager->setConsolidationArrayOffsets(index_offset_array); //Exclusive scan row length array to get row offsets thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.end(), new_row_offsets.begin()); cudaCheckError(); //Prepare to receive column indices and values int num_nz_consolidated = new_row_offsets[new_row_offsets.size() - 1]; IVector recv_col_indices(max_num_nz); IVector new_col_indices(num_nz_consolidated); MVector recv_values((max_num_nz + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); MVector new_values((num_nz_consolidated + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); thrust::fill(new_col_indices.begin() + new_row_offsets[halo_offsets[0]], new_col_indices.end(), -1); //Set all the halo col indices to -1 if (!Ac.hasProps(DIAG)) { thrust::fill(new_values.begin() + num_nz_consolidated * Ac.get_block_size(), new_values.end(), types::util<ValueTypeA>::get_zero()); } cudaCheckError(); IVector *work_col_indices; MVector *work_values; interior_offset = 0; boundary_offset = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; boundary_offset += vertex_counts[i][0]; } for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; if (current_part != my_id) { A.manager->getComms()->recv_vector(recv_col_indices, current_part, 1112, 0, num_nz[i]); A.manager->getComms()->recv_vector(recv_values, current_part, 1113, 0, (num_nz[i] + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); work_col_indices = &(recv_col_indices); work_row_offsets = &(recv_row_offsets[i]); work_values = &(recv_values); } else { work_row_offsets = &(Ac.row_offsets); work_col_indices = &(Ac.col_indices); work_values = &(Ac.values); } //Put interior rows in place thrust::copy(work_col_indices->begin() + (*work_row_offsets)[interior_offset], work_col_indices->begin() + (*work_row_offsets)[interior_offset + vertex_counts[i][0]], new_col_indices.begin() + new_row_offsets[interior_offset]); cudaCheckError(); thrust::copy(work_values->begin() + (*work_row_offsets)[interior_offset]*Ac.get_block_size(), work_values->begin() + ((*work_row_offsets)[interior_offset + vertex_counts[i][0]])*Ac.get_block_size(), new_values.begin() + new_row_offsets[interior_offset]*Ac.get_block_size()); cudaCheckError(); //Put boundary rows in place thrust::copy(work_col_indices->begin() + (*work_row_offsets)[boundary_offset], work_col_indices->begin() + (*work_row_offsets)[boundary_offset + vertex_counts[i][1]], new_col_indices.begin() + new_row_offsets[boundary_offset]); cudaCheckError(); thrust::copy(work_values->begin() + (*work_row_offsets)[boundary_offset]*Ac.get_block_size(), work_values->begin() + ((*work_row_offsets)[boundary_offset + vertex_counts[i][1]])*Ac.get_block_size(), new_values.begin() + new_row_offsets[boundary_offset]*Ac.get_block_size()); cudaCheckError(); //Process halo rows (merge) int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0]; const int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); //TODO: vectorise this kernel, will be inefficient for larger block sizes hipLaunchKernelGGL(( append_halo_nz) , dim3(num_blocks), dim3(block_size), 0, 0, work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], work_col_indices->raw(), new_col_indices.raw(), work_values->raw(), new_values.raw(), size, Ac.hasProps(DIAG), halo_offsets[0], Ac.get_block_size()); cudaCheckError(); // Diagonals if (Ac.hasProps(DIAG)) { // Diagonal corresponding to interior rows thrust::copy(work_values->begin() + (num_nz[i] + interior_offset)*Ac.get_block_size(), work_values->begin() + (num_nz[i] + interior_offset + vertex_counts[i][0])*Ac.get_block_size(), new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + interior_offset)*Ac.get_block_size()); // Diagonal corresponding to boundary rows thrust::copy(work_values->begin() + (num_nz[i] + boundary_offset)*Ac.get_block_size(), work_values->begin() + (num_nz[i] + boundary_offset + vertex_counts[i][1])*Ac.get_block_size(), new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + boundary_offset)*Ac.get_block_size()); cudaCheckError(); } interior_offset += vertex_counts[i][0]; boundary_offset += vertex_counts[i][1]; } Ac.set_initialized(0); Ac.row_offsets = new_row_offsets; Ac.col_indices = new_col_indices; Ac.values = new_values; } // Create a new distributed communicator for coarse levels that only contains active partitions Ac.manager->setComms(A.manager->getComms()->Clone()); Ac.manager->getComms()->createSubComm(coarse_part_to_fine_part, is_root_partition); // // Step 12 - finalizing, bookkeping // if (is_root_partition) { int my_consolidated_id = fine_part_to_coarse_part[my_id]; for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_coarse_neigh_to_fine_part[i] = fine_part_to_coarse_part[consolidated_coarse_neigh_to_fine_part[i]]; } Ac.manager->initializeAfterConsolidation( my_consolidated_id, Ac, consolidated_coarse_neigh_to_fine_part, total_interior_rows_in_merged, total_boundary_rows_in_merged, this->m_num_all_aggregates, halo_offsets, consolidated_B2L_maps, 1, true); Ac.manager->B2L_rings.resize(num_consolidated_neighbors + 1); for (int i = 0; i < num_consolidated_neighbors; i++) { Ac.manager->B2L_rings[i].resize(2); Ac.manager->B2L_rings[i][0] = 0; Ac.manager->B2L_rings[i][1] = consolidated_B2L_maps[i].size(); } Ac.manager->set_initialized(Ac.row_offsets); Ac.manager->getComms()->set_neighbors(num_consolidated_neighbors); int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; Ac.set_num_nz(new_nnz); Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]); Ac.set_num_rows(Ac.get_num_cols()); if (A.hasProps(DIAG)) { Ac.addProps(DIAG); } Ac.computeDiagonal(); Ac.set_initialized(1); } else { Ac.set_initialized(0); // set size of Ac to be zero Ac.resize(0, 0, 0, 1); Ac.set_initialized(1); } this->getA().setView(oldView); Ac.setView(OWNED); } else { /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0). Notice that in aggregation path, prolongation P is implicit, and is used through the aggregates array. */ if (this->isReuseLevel() == false) { this->setNeighborAggregates(); } this->getA().setView(ALL); // Compute restriction operator // TODO: computing the restriction operator could be merged with the selector to save some work // If we reuse the level we keep the previous restriction operator if (this->isReuseLevel() == false) { profileSubphaseComputeRestriction(); this->Profile.tic("computeR"); computeRestrictionOperator(); this->Profile.toc("computeR"); } profileSubphaseComputeCoarseA(); this->Profile.tic("computeA"); Ac.set_initialized(0); Ac.copyAuxData(&A); this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates); Ac.setColsReorderedByColor(false); Ac.setView(FULL); this->Profile.toc("computeA"); this->prepareNextLevelMatrix(A, Ac); A.setView(OWNED); Ac.setView(OWNED); } this->m_next_level_size = this->m_num_all_aggregates * Ac.get_block_dimy(); if (this->m_print_aggregation_info) { MatrixAnalysis<TConfig> ana(&Ac); ana.aggregatesQuality2(this->m_aggregates, this->m_num_aggregates, A); } } // ------------------------------------------------------------- // Explicit instantiations // ------------------------------------------------------------- #define AMGX_CASE_LINE(CASE) template class Aggregation_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } }
bc09efc7dc6d94edf5785a80206807fed29bd48c.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <aggregation/aggregation_amg_level.h> #include <profile.h> #include <matrix_analysis.h> #ifdef _WIN32 #pragma warning (push) #pragma warning (disable : 4244 4267 4521) #endif #ifdef _WIN32 #pragma warning (pop) #endif #include <basic_types.h> #include <util.h> #include <fstream> #include <cutil.h> #include <multiply.h> #include <transpose.h> #include <blas.h> #include <string> #include <string.h> #include <iostream> #include <algorithm> #include <amgx_timer.h> #include <amgx_types/util.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <thrust/transform.h> #include <thrust/binary_search.h> #include <thrust/unique.h> #include <thrust/inner_product.h> namespace amgx { namespace aggregation { // ---------------------- // Kernels // ---------------------- template <typename IndexType, typename ValueType> __global__ void set_to_one_kernel(IndexType start, IndexType end, IndexType *ind, ValueType *v) { for (int tid = start + blockDim.x * blockIdx.x + threadIdx.x; tid < end; tid += gridDim.x * blockDim.x) { v[ind[tid]] = types::util<ValueType>::get_one(); } } template <typename IndexType> __global__ void renumberAggregatesKernel(const IndexType *renumbering, const int interior_offset, const int bdy_offset, IndexType *aggregates, const int num_aggregates, const int n_interior, const int renumbering_size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < num_aggregates) { IndexType new_agg_id; if (renumbering_size == 0) { new_agg_id = aggregates[tid]; } else { new_agg_id = renumbering[aggregates[tid]]; } //if (aggregates[tid] > num_aggregates) //{ //printf("ID %d old %d + %d = %d\n", tid, new_agg_id, ((new_agg_id >= n_interior) ? bdy_offset : interior_offset), new_agg_id + ((new_agg_id >= n_interior) ? bdy_offset : interior_offset)); //} new_agg_id += ((new_agg_id >= n_interior) ? bdy_offset : interior_offset); aggregates[tid] = new_agg_id; tid += gridDim.x * blockDim.x; } } // Kernel to restrict residual using csr_format template <typename IndexType, typename ValueType> __global__ void restrictResidualKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates) { int jmin, jmax; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x) { ValueType temp(types::util<ValueType>::get_zero()); jmin = row_offsets[tid]; jmax = row_offsets[tid + 1]; for (int j = jmin; j < jmax; j++) { int j_col = column_indices[j]; temp = temp + r[j_col]; } rr[tid] = temp; } } // Kernel to restrict residual using block_dia_csr_format template <typename IndexType, typename ValueType, int bsize> __global__ void restrictResidualBlockDiaCsrKernel(const IndexType *row_offsets, const IndexType *column_indices, const ValueType *r, ValueType *rr, const int num_aggregates) { ValueType rr_temp[bsize]; int offset, jmin, jmax; for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_aggregates; tid += gridDim.x * blockDim.x) { // Initialize to zero #pragma unroll for (int m = 0; m < bsize; m++) { rr_temp[m] = types::util<ValueType>::get_zero(); } jmin = row_offsets[tid]; jmax = row_offsets[tid + 1]; for (int j = jmin; j < jmax; j++) { int jcol = column_indices[j]; offset = jcol * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { rr_temp[m] = rr_temp[m] + r[offset + m]; } } offset = tid * bsize; #pragma unroll for (int m = 0; m < bsize; m++) { rr[offset + m] = rr_temp[m]; }; } } // Kernel to prolongate and apply the correction for csr format template <typename IndexType, typename ValueType> __global__ void prolongateAndApplyCorrectionKernel(const ValueType alpha, const int num_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x) { IndexType I = aggregates[tid]; x[tid] = x[tid] + alpha * e[I]; } } // Kernel to prolongate and apply the correction for block-dia-csr format template <typename IndexType, typename ValueType> __global__ void prolongateAndApplyCorrectionBlockDiaCsrKernel(const ValueType alpha, const int num_block_rows, ValueType *x, const ValueType *e, const IndexType *aggregates, IndexType num_aggregates, const int bsize) { for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_block_rows; tid += gridDim.x * blockDim.x) { IndexType I = aggregates[tid]; for (int m = 0; m < bsize; m++) { x[tid * bsize + m] = x[tid * bsize + m] + alpha * e[I * bsize + m]; } } } template <typename IndexType, typename ValueType> __global__ void prolongateVector(const IndexType *aggregates, const ValueType *in, ValueType *out, IndexType fine_rows, IndexType coarse_rows, int blocksize) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < fine_rows * blocksize ) { int i = tid / blocksize; int e = tid % blocksize; IndexType I = aggregates[i]; out[tid] = in[ I * blocksize + e ]; tid += gridDim.x * blockDim.x; } } template <typename IndexType, typename ValueType> __global__ void applyCorrection(ValueType lambda, const ValueType *e, ValueType *x, IndexType numRows ) { int tid = threadIdx.x + blockDim.x * blockIdx.x; while ( tid < numRows ) { x[tid] = x[tid] + lambda * e[tid]; tid += gridDim.x * blockDim.x; } } // ------------------------------- // Methods // ------------------------------ // Constructor template <class T_Config> Aggregation_AMG_Level_Base<T_Config>::Aggregation_AMG_Level_Base(AMG_Class *amg, ThreadManager *tmng) : AMG_Level<T_Config>(amg, tmng) { m_selector = SelectorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); m_coarseAGenerator = CoarseAGeneratorFactory<T_Config>::allocate(*(amg->m_cfg), amg->m_cfg_scope); m_matrix_halo_exchange = amg->m_cfg->AMG_Config::getParameter<int>("matrix_halo_exchange", amg->m_cfg_scope); m_print_aggregation_info = amg->m_cfg->AMG_Config::getParameter<int>("print_aggregation_info", amg->m_cfg_scope) != 0; m_error_scaling = amg->m_cfg->AMG_Config::getParameter<int>("error_scaling", amg->m_cfg_scope ); reuse_scale = amg->m_cfg->AMG_Config::getParameter<int>("reuse_scale", amg->m_cfg_scope ); scaling_smoother_steps = amg->m_cfg->AMG_Config::getParameter<int>("scaling_smoother_steps", amg->m_cfg_scope ); scale_counter = 0; } // Destructor template <class T_Config> Aggregation_AMG_Level_Base<T_Config>::~Aggregation_AMG_Level_Base() { delete m_selector; delete m_coarseAGenerator; } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::transfer_level(AMG_Level<TConfig1> *ref_lvl) { Aggregation_AMG_Level_Base<TConfig1> *ref_agg_lvl = dynamic_cast<Aggregation_AMG_Level_Base<TConfig1>*>(ref_lvl); this->scale_counter = ref_agg_lvl->scale_counter; this->scale = ref_agg_lvl->scale; this->m_R_row_offsets.copy(ref_agg_lvl->m_R_row_offsets); this->m_R_column_indices.copy(ref_agg_lvl->m_R_column_indices); this->m_aggregates.copy(ref_agg_lvl->m_aggregates); this->m_aggregates_fine_idx.copy(ref_agg_lvl->m_aggregates_fine_idx); this->m_num_aggregates = ref_agg_lvl->m_num_aggregates; this->m_num_all_aggregates = ref_agg_lvl->m_num_all_aggregates; } typedef std::pair<int, int> mypair; bool comparator ( const mypair &l, const mypair &r) { return l.first < r.first; } // Method to compute R // General path // TODO: this could be merged with selector to save some computations template <typename T_Config> void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator_common() { m_R_row_offsets.resize(m_num_all_aggregates + 1); //create one more row for the pseudo aggregate IVector R_row_indices(m_aggregates); #if AMGX_ASYNCCPU_PROOF_OF_CONCEPT bool use_cpu = m_aggregates.size() < 4096; if (use_cpu) { struct computeRestrictionTask : public task { Aggregation_AMG_Level_Base<T_Config> *self; IVector *R_row_indices; void run() { int N = self->m_aggregates.size(); IVector_h R_row_indices_host(self->m_aggregates); std::vector<mypair> pairs(N); for (int i = 0; i < N; i++) { pairs[i].first = R_row_indices_host[i]; pairs[i].second = i; } std::stable_sort(pairs.begin(), pairs.end(), comparator); IVector_h R_column_indices(self->A->get_num_rows()); for (int i = 0; i < N; i++) { R_column_indices[i] = pairs[i].second; R_row_indices_host[i] = pairs[i].first; } self->m_R_column_indices = R_column_indices; *R_row_indices = R_row_indices_host; } }; computeRestrictionTask *t = new computeRestrictionTask(); t->self = this; t->R_row_indices = &R_row_indices; t->run(); delete t; } else #endif { m_R_column_indices.resize(this->A->get_num_rows()); thrust::sequence(m_R_column_indices.begin(), m_R_column_indices.end()); cudaCheckError(); thrust::sort_by_key(R_row_indices.begin(), R_row_indices.end(), m_R_column_indices.begin()); cudaCheckError(); } thrust::lower_bound(R_row_indices.begin(), R_row_indices.end(), thrust::counting_iterator<typename IVector::value_type>(0), thrust::counting_iterator<typename IVector::value_type>(m_R_row_offsets.size()), m_R_row_offsets.begin()); cudaCheckError(); } // two methods below could be merged // Method to compute R on HOST using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1() { this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1); this->m_R_column_indices.resize(this->A->get_num_rows()); this->fillRowOffsetsAndColIndices(this->A->get_num_rows()); } // Method to compute R on HOST using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4() { this->m_R_row_offsets.resize(this->m_num_all_aggregates + 1); this->m_R_column_indices.resize(this->A->get_num_rows()); this->fillRowOffsetsAndColIndices(this->A->get_num_rows()); } // Method to create R_row_offsest and R_column_indices array on HOST using csr or block dia-csr format template <typename T_Config> void Aggregation_AMG_Level_Base<T_Config>::fillRowOffsetsAndColIndices(const int R_num_cols) { for (int i = 0; i < m_num_all_aggregates + 1; i++) { m_R_row_offsets[i] = 0; } // Count number of neighbors for each row for (int i = 0; i < R_num_cols; i++) { int I = m_aggregates[i]; m_R_row_offsets[I]++; } m_R_row_offsets[m_num_all_aggregates] = R_num_cols; for (int i = m_num_all_aggregates - 1; i >= 0; i--) { m_R_row_offsets[i] = m_R_row_offsets[i + 1] - m_R_row_offsets[i]; } /* Set column indices. */ for (int i = 0; i < R_num_cols; i++) { int I = m_aggregates[i]; int Ip = m_R_row_offsets[I]++; m_R_column_indices[Ip] = i; } /* Reset r[i] to start of row memory. */ for (int i = m_num_all_aggregates - 1; i > 0; i--) { m_R_row_offsets[i] = m_R_row_offsets[i - 1]; } m_R_row_offsets[0] = 0; } // Method to compute R on DEVICE using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_4x4() { this->computeRestrictionOperator_common(); } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeRestrictionOperator_1x1() { this->computeRestrictionOperator_common(); } // Method to restrict Residual on host using csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr) { ValueTypeB temp; for (int i = 0; i < this->m_num_aggregates; i++) { temp = types::util<ValueTypeB>::get_zero(); for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++) { int j_col = this->m_R_column_indices[j]; temp = temp + r[j_col]; } rr[i] = temp; } } // Method to restrict Residual on host using block_dia_csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr) { IndexType bsize = this->A->get_block_dimy(); ValueTypeB *temp = new ValueTypeB[bsize]; for (int i = 0; i < this->m_num_aggregates; i++) { // Initialize temp to 0 for (int k = 0; k < bsize; k++) { temp[k] = types::util<ValueTypeB>::get_zero(); } // Add contributions from each fine point for (int j = this->m_R_row_offsets[i]; j < this->m_R_row_offsets[i + 1]; j++) { int j_col = this->m_R_column_indices[j]; for (int k = 0; k < bsize; k++) { temp[k] = temp[k] + r[j_col * bsize + k]; } } // Store result for (int k = 0; k < bsize; k++) { rr[i * bsize + k] = temp[k]; } } } // Method to restrict Residual on device using csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_1x1(const VVector &r, VVector &rr) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::restrict_residual_1x1 "); int block_size = 64; int max_threads;; if (!this->isConsolidationLevel()) { max_threads = this->m_num_aggregates; } else { max_threads = this->m_num_all_aggregates; } int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads - 1) / block_size + 1); const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw(); const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw(); const ValueTypeB *r_ptr = r.raw(); ValueTypeB *rr_ptr = rr.raw(); restrictResidualKernel <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); cudaCheckError(); } // Method to restrict Residual on device using block_dia_csr_matrix format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::restrictResidual_4x4(const VVector &r, VVector &rr) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::restrict_residual_4x4 "); int block_size = 64; int max_threads; if (!this->isConsolidationLevel()) { max_threads = this->m_num_aggregates; } else { max_threads = this->m_num_all_aggregates; }; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (max_threads + block_size - 1) / block_size); const IndexType *R_row_offsets_ptr = this->m_R_row_offsets.raw(); const IndexType *R_column_indices_ptr = this->m_R_column_indices.raw(); const ValueTypeB *r_ptr = r.raw(); ValueTypeB *rr_ptr = rr.raw(); cudaCheckError(); switch ( this->getA().get_block_dimy() ) { case 2: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 2> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 3: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 3> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 4: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 4> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 5: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 5> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 8: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 8> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; case 10: restrictResidualBlockDiaCsrKernel<IndexType, ValueTypeB, 10> <<< num_blocks, block_size>>>(R_row_offsets_ptr, R_column_indices_ptr, r_ptr, rr_ptr, max_threads); break; default: FatalError( "Unsupported block size in restrictResidual_4x4!!!", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } __inline__ float getAlpha(float &nom, float &denom) { float alpha; if (nom * denom <= 0. || std::abs(nom) < std::abs(denom)) { alpha = 1.; } else if (std::abs(nom) > 2.*std::abs(denom)) { alpha = 2.; } else { alpha = nom / denom; } return alpha; } __inline__ double getAlpha(double &nom, double &denom) { double alpha; if (nom * denom <= 0. || std::abs(nom) < std::abs(denom)) { alpha = 1.; } else if (std::abs(nom) > 2.*std::abs(denom)) { alpha = 2.; } else { alpha = nom / denom; } return alpha; } __inline__ cuComplex getAlpha(cuComplex &nom, cuComplex &denom) { cuComplex alpha; if (types::util<cuComplex>::abs(nom) < types::util<cuComplex>::abs(denom)) { alpha = make_cuComplex(1.f, 0.f); } else if (types::util<cuComplex>::abs(nom) > 2.*types::util<cuComplex>::abs(denom)) { alpha = make_cuComplex(2.f, 0.f); } else { alpha = nom / denom; } return alpha; } __inline__ cuDoubleComplex getAlpha(cuDoubleComplex &nom, cuDoubleComplex &denom) { cuDoubleComplex alpha; if (types::util<cuDoubleComplex>::abs(nom) < types::util<cuDoubleComplex>::abs(denom)) { alpha = make_cuDoubleComplex(1., 0.); } else if (types::util<cuDoubleComplex>::abs(nom) > 2.*types::util<cuDoubleComplex>::abs(denom)) { alpha = make_cuDoubleComplex(2., 0.); } else { alpha = nom / denom; } return alpha; } template< class T_Config> typename T_Config::VecPrec Aggregation_AMG_Level_Base<T_Config>::computeAlpha(const Vector<T_Config> &e, const Vector<T_Config> &bc, const Vector<T_Config> &tmp) { typename T_Config::VecPrec alpha = types::util<ValueTypeB>::get_one(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); int size = Ac.get_num_rows(); VVector v(2, types::util<ValueTypeB>::get_zero()); v[0] = thrust::inner_product(e.begin(), e.begin() + size, bc.begin(), types::util<ValueTypeB>::get_zero()); v[1] = thrust::inner_product(e.begin(), e.begin() + size, tmp.begin(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); return getAlpha(v[0], v[1]); } // Method to prolongate the error on HOST using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A = this->getA(); Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &C = this->next_h->getA(); if ( this->m_error_scaling >= 2 ) { FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED ); } ValueTypeB alpha = types::util<ValueTypeB>::get_one(); if (this->m_error_scaling) { multiply(this->next_h->getA(), e, tmp); alpha = this->computeAlpha (e, bc, tmp); } // Apply correction on all (interior and exterior) equations. for (int i = 0; i < A.get_num_cols(); i++) { int I = this->m_aggregates[i]; x[i] = x[i] + alpha * e[I]; } } // Method to prolongate the error on HOST using block_dia_csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { if (this->A->get_block_dimy() != this->A->get_block_dimx()) { FatalError("Aggregation_AMG_Level not implemented for non square blocks, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( this->m_error_scaling >= 2 ) { FatalError("error_scaling=2,3 is not implemented on host", AMGX_ERR_NOT_IMPLEMENTED ); } Matrix<TConfig> &C = this->next_h->getA(); ValueTypeB alpha = types::util<ValueTypeB>::get_one(); if (this->m_error_scaling) { multiply(this->next_h->getA(), e, tmp); alpha = this->computeAlpha (e, bc, tmp); } // Apply correction on all equations. for (int i = 0; i < this->A->get_num_rows(); i++) { int I = this->m_aggregates[i]; for (int k = 0; k < this->A->get_block_dimy(); k++) { x[i * this->A->get_block_dimy() + k] = x[i * this->A->get_block_dimy() + k] + alpha * e[I * this->A->get_block_dimy() + k]; } } } // Prolongate the error on DEVICE using csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_1x1(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &e, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bc, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &x, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &tmp) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::prolongate_and_apply_correction_1x1 "); ValueTypeB alpha = types::util<ValueTypeB>::get_one(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ( (this->A->get_num_rows() + block_size - 1) / block_size ) ); const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = x.raw(); const ValueTypeB *e_ptr = e.raw(); if (this->m_error_scaling) { FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED ); } prolongateAndApplyCorrectionKernel <<< num_blocks, block_size>>>(alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates); cudaCheckError(); } // Prolongate the error on DEVICE using block dia-csr format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void Aggregation_AMG_Level<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::prolongateAndApplyCorrection_4x4(Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &ec, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &bf, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &xf, Vector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &rf) { AMGX_CPU_PROFILER("Aggregation_AMG_Level::prolongate_and_apply_correction_4x4 "); if ( this->m_error_scaling >= 2 ) { if ( this->scale_counter > 0 ) { const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = xf.raw(); const ValueTypeB *e_ptr = ec.raw(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1)); prolongateAndApplyCorrectionBlockDiaCsrKernel <<< num_blocks, block_size>>>(this->scale, (int)this->getA().get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->getA().get_block_dimy()); cudaCheckError(); this->scale_counter--; return; } bool vanek_scaling = this->m_error_scaling > 3; IndexType numRowsCoarse = this->next_d->getA().get_num_rows(); IndexType numRowsFine = this->A->get_num_rows(); IndexType blockdim = this->A->get_block_dimx(); if ( blockdim != this->A->get_block_dimy() ) { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } VVector ef( rf.size() ); VVector Aef( rf.size() ); ef.set_block_dimy( blockdim ); Aef.set_block_dimy( blockdim ); // prolongate e const int threads_per_block = 256; const int num_block_values = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1); const cudaStream_t stream = nullptr; prolongateVector <<< num_block_values, threads_per_block, 0, stream>>>( this->m_aggregates.raw(), ec.raw(), ef.raw(), numRowsFine, numRowsCoarse, blockdim ); ef.dirtybit = 1; cudaStreamSynchronize(stream); cudaCheckError(); int preSmooth; if ( vanek_scaling ) { preSmooth = this->amg->getNumPostsweeps(); } else { preSmooth = this->scaling_smoother_steps; } //smooth error this->smoother->setTolerance( 0.0 ); this->smoother->set_max_iters( preSmooth ); if ( vanek_scaling ) { thrust::fill( Aef.begin(), Aef.end(), types::util<ValueTypeB>::get_zero() ); cudaCheckError(); this->smoother->solve( Aef, ef, false ); //smooth correction with rhs 0 this->smoother->solve( bf, xf, false ); // smooth x with rhs residual //recompute residual int offset, size; this->getA().getOffsetAndSizeForView(OWNED, &offset, &size); axmb( this->getA(), xf, bf, rf, offset, size ); } else { this->smoother->solve( rf, ef, false ); //smooth correction with rhs residual } // multiply for lambda computation multiply(this->getA(), ef, Aef, OWNED); ValueTypeB nominator, denominator; int offset = 0, size = 0; this->A->getOffsetAndSizeForView(OWNED, &offset, &size); if ( this->m_error_scaling == 2 || this->m_error_scaling == 4 ) { // compute lambda=<rf,Aef>/<Aef,Aef> nominator = thrust::inner_product( rf.begin(), rf.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() ); denominator = thrust::inner_product( Aef.begin(), Aef.end(), Aef.begin(), types::util<ValueTypeB>::get_zero() ); cudaCheckError(); } if ( this->m_error_scaling == 3 || this->m_error_scaling == 5) { // compute lambda=<rf,ef>/<ef,Aef> nominator = thrust::inner_product( rf.begin(), rf.begin() + size * blockdim, ef.begin(), types::util<ValueTypeB>::get_zero() ); denominator = thrust::inner_product( ef.begin(), ef.begin() + size * blockdim, Aef.begin(), types::util<ValueTypeB>::get_zero() ); if (!this->A->is_matrix_singleGPU()) { this->A->getManager()->global_reduce_sum(&nominator); this->A->getManager()->global_reduce_sum(&denominator); } cudaCheckError(); } if (types::util<ValueTypeB>::abs(denominator) == 0.0) { nominator = denominator = types::util<ValueTypeB>::get_one(); } // apply correction x <- x + lambda*e const int num_block_fine = min( AMGX_GRID_MAX_SIZE, (numRowsFine * blockdim - 1) / threads_per_block + 1 ); ValueTypeB alpha = nominator / denominator; if ( types::util<ValueTypeB>::abs(alpha) < .3 ) { alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * .3; // it was this before: alpha = .3, which is not 100% equal } if ( types::util<ValueTypeB>::abs(alpha) > 10 ) { alpha = (alpha / types::util<ValueTypeB>::abs(alpha)) * 10.; // it was this before: alpha = 10., which is not 100% equal } applyCorrection <<< num_block_fine, threads_per_block, 0, stream>>>( alpha, ef.raw(), xf.raw(), numRowsFine * blockdim ); cudaCheckError(); this->scale_counter = this->reuse_scale; //reuse this scale scale_counter times this->scale = alpha; return; } ValueTypeB alpha = types::util<ValueTypeB>::get_one(); const int block_size = 64; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (int) ((this->A->get_num_rows() - 1) / block_size + 1)); const IndexType *aggregates_ptr = this->m_aggregates.raw(); ValueTypeB *x_ptr = xf.raw(); const ValueTypeB *e_ptr = ec.raw(); if (this->m_error_scaling == 1) { FatalError("error_scaling=1 is deprecated", AMGX_ERR_NOT_IMPLEMENTED ); } prolongateAndApplyCorrectionBlockDiaCsrKernel <<< num_blocks, block_size>>>(alpha, (int)this->A->get_num_rows(), x_ptr, e_ptr, aggregates_ptr, this->m_num_aggregates, this->A->get_block_dimy()); cudaCheckError(); } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config >::prolongateAndApplyCorrection(VVector &e, VVector &bf, VVector &x, VVector &tmp) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //this is dirty, but error scaling 2 and 3 do not have a specialized version. Instead, the general version sits in the 4x4 function if ( this->m_error_scaling >= 2 ) { prolongateAndApplyCorrection_4x4(e, bf, x, tmp); } else if (this->A->get_block_size() == 1) { prolongateAndApplyCorrection_1x1(e, bf, x, tmp); } else if (this->A->get_block_dimx() == this->A->get_block_dimy() ) { prolongateAndApplyCorrection_4x4(e, bf, x, tmp); } else { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } x.dirtybit = 1; if (!this->A->is_matrix_singleGPU() && x.delayed_send == 0) { if (x.in_transfer & RECEIVING) { this->A->manager->exchange_halo_wait(x, x.tag); } this->A->manager->exchange_halo_async(x, x.tag); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::restrictResidual(VVector &r, VVector &rr) { if (this->A->get_block_size() == 1) { restrictResidual_1x1(r, rr); } else if (this->A->get_block_dimx() == this->A->get_block_dimy() ) { restrictResidual_4x4(r, rr); } else { FatalError("Unsupported dimension for aggregation amg level", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } //TODO: check level transfer between host and device for multiGPU if (!this->A->is_matrix_singleGPU()) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); rr.dirtybit = 1; if (!Ac.is_matrix_singleGPU() && !this->isConsolidationLevel() && rr.delayed_send == 0) { Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); //TODO problem in memoryspace transfer is here if (rr.in_transfer & RECEIVING) { Ac.manager->exchange_halo_wait(rr, rr.tag); } Ac.manager->exchange_halo_async(rr, rr.tag); } } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::computeRestrictionOperator() { if (this->A->get_block_size() == 1) { computeRestrictionOperator_1x1(); } else if (this->A->get_block_dimx() == 4 && this->A->get_block_dimy() == 4) { computeRestrictionOperator_4x4(); } else { this->computeRestrictionOperator_common(); } } template <typename IndexType> __global__ void coarse_to_global(IndexType *aggregates, IndexType *aggregates_global, IndexType *renumbering, IndexType num_elements, int64_t offset) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < num_elements) { renumbering[aggregates[element]] = aggregates_global[element] + offset; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } template <typename T, typename IndexType> __global__ void export_matrix_elements(IndexType *row_offsets, IndexType *col_indices, T *values, IndexType *maps, IndexType *renumbering, IndexType *new_row_offsets, IndexType *new_col_indices, T *new_values, IndexType bsize, IndexType size) { int idx = blockIdx.x * blockDim.x / 32 + threadIdx.x / 32; int coopIdx = threadIdx.x % 32; while (idx < size) { int row = maps[idx]; INDEX_TYPE src_base = row_offsets[row]; INDEX_TYPE dst_base = new_row_offsets[idx]; for (int m = coopIdx; m < row_offsets[row + 1]*bsize - src_base * bsize; m += 32) { new_values[dst_base * bsize + m] = values[src_base * bsize + m]; } for (int m = coopIdx; m < row_offsets[row + 1] - src_base; m += 32) { new_col_indices[dst_base + m] = renumbering[col_indices[src_base + m]]; } idx += gridDim.x * blockDim.x / 32; } } template <class T> __global__ void export_matrix_diagonal(T *values, INDEX_TYPE bsize, INDEX_TYPE *maps, T *output, INDEX_TYPE size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; while (idx < size) { int row = maps[idx]; INDEX_TYPE src_base = row; INDEX_TYPE dst_base = idx; for (int m = 0; m < bsize; m++) { output[dst_base * bsize + m] = values[src_base * bsize + m]; } idx += gridDim.x * blockDim.x; } } __global__ void remove_boundary(INDEX_TYPE *flags, INDEX_TYPE *maps, INDEX_TYPE size) { int element = blockIdx.x * blockDim.x + threadIdx.x; while (element < size) { flags[maps[element]] = 0; //this won't be a problem, because we are overwriting the same thing element += blockDim.x * gridDim.x; } } __global__ void calc_inverse_renumbering(INDEX_TYPE *renum, INDEX_TYPE *irenum, INDEX_TYPE *renum_gbl, INDEX_TYPE base_index, INDEX_TYPE max_element) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < max_element) { irenum[renum[idx]] = renum_gbl[idx] - base_index; idx += blockDim.x * gridDim.x; } } __global__ void create_halo_mapping(INDEX_TYPE *mapping, INDEX_TYPE *node_list, INDEX_TYPE base_index, INDEX_TYPE map_offset, INDEX_TYPE size) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < size) { int idx = node_list[row] - base_index; mapping[idx] = map_offset + row; row += blockDim.x * gridDim.x; } } __global__ void map_col_indices_and_count_rowlen(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *row_length, INDEX_TYPE *renumbering, INDEX_TYPE *mapping, INDEX_TYPE *map_offsets, int64_t *index_ranges, INDEX_TYPE part_id, INDEX_TYPE my_id, INDEX_TYPE base_index, INDEX_TYPE my_range, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows) { extern __shared__ volatile int reduction[]; int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4; int coopIdx = threadIdx.x % 4; while (row < num_rows) { int valid = 0; for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) //this may look horrible, but I expect low branch divergence, because col indices in a row usually belong to the same partition (or at most one more) { int colIdx = col_indices[idx]; int part = -2; if (colIdx >= index_ranges[2 * part_id] && colIdx < index_ranges[2 * part_id + 1]) //the col index probably belongs to the partition I am working on { part = part_id; } else if (colIdx >= base_index && colIdx < base_index + my_range) //or points back to the owned partition { part = -1; } else //or else it points to a third partition { for (int i = 0; i < num_neighbors; i++) { if (colIdx >= index_ranges[2 * i] && colIdx < index_ranges[2 * i + 1]) { part = i; } } } if (part == -2) { col_indices[idx] = -1; #ifdef DEBUG printf("Column index encountered that does not belong to any of my neighbors!! %d\n", colIdx); #endif } else { if (part == -1) { col_indices[idx] = renumbering[colIdx - base_index]; valid++; } else { int new_col_idx = mapping[map_offsets[part] + colIdx - index_ranges[2 * part]]; if (new_col_idx >= 0) { valid++; col_indices[idx] = new_col_idx; } else { col_indices[idx] = -1; } } } } reduction[threadIdx.x] = valid; for (int s = 2; s > 0; s >>= 1) { if (coopIdx < s) { reduction[threadIdx.x] += reduction[threadIdx.x + s]; } __syncthreads(); } if (coopIdx == 0) { row_length[row] = reduction[threadIdx.x]; } row += gridDim.x * blockDim.x / 4; } } __global__ void map_col_indices(INDEX_TYPE *row_offsets, INDEX_TYPE *col_indices, int64_t *halo_ranges, INDEX_TYPE *halo_renumbering, INDEX_TYPE *halo_rows, INDEX_TYPE *global_renumbering, INDEX_TYPE num_neighbors, INDEX_TYPE num_rows, INDEX_TYPE num_rows_processed) { int row = blockIdx.x * blockDim.x / 4 + threadIdx.x / 4; int coopIdx = threadIdx.x % 4; while (row < num_rows_processed) { for (int idx = row_offsets[row] + coopIdx; idx < row_offsets[row + 1]; idx += 4) { int colIdx = col_indices[idx]; int part = 0; if (colIdx < num_rows) { part = -1; } else { colIdx = global_renumbering[colIdx]; for (int i = 0; i < num_neighbors; i++) { if (colIdx >= halo_ranges[2 * i] && colIdx < halo_ranges[2 * i + 1]) { part = i; break; } } } if (part == -1) { col_indices[idx] = colIdx; } else { col_indices[idx] = halo_renumbering[halo_rows[part] + colIdx - halo_ranges[2 * part]]; } } row += gridDim.x * blockDim.x / 4; } } template <class T> __global__ void reorder_whole_matrix(INDEX_TYPE *old_rows, INDEX_TYPE *old_cols, T *old_vals, INDEX_TYPE *rows, INDEX_TYPE *cols, T *vals, INDEX_TYPE bsize, INDEX_TYPE num_rows) { int row = blockIdx.x * blockDim.x + threadIdx.x; while (row < num_rows) { INDEX_TYPE dst_row = row; INDEX_TYPE src_base = old_rows[row]; INDEX_TYPE dst = rows[dst_row]; for (int i = 0; i < old_rows[row + 1] - src_base; i++) { INDEX_TYPE colIdx = old_cols[src_base + i]; if (colIdx >= 0) { cols[dst] = colIdx; for (int j = 0; j < bsize; j++) { vals[dst * bsize + j] = old_vals[(src_base + i) * bsize + j]; } dst++; } } row += blockDim.x * gridDim.x; } } __global__ void calc_gbl_renumbering(INDEX_TYPE *inv_renum, INDEX_TYPE *gbl_renum, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { gbl_renum[inv_renum[idx]] = idx; idx += blockDim.x * gridDim.x; } } template <typename ValueType> __global__ void write_diagonals(ValueType *values, INDEX_TYPE *diag, INDEX_TYPE *map, ValueType *output, INDEX_TYPE bsize, INDEX_TYPE size) { int nzPerBlock = blockDim.x / bsize; int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize; int vecIdx = threadIdx.x % bsize; if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; } while (row < size) { output[row * bsize + vecIdx] = values[diag[map[row]] * bsize + vecIdx]; row += gridDim.x * nzPerBlock; } } template <typename ValueType> __global__ void write_diagonals_back(ValueType *values, INDEX_TYPE *diag, ValueType *source, INDEX_TYPE bsize, INDEX_TYPE size) { int nzPerBlock = blockDim.x / bsize; int row = blockIdx.x * nzPerBlock + threadIdx.x / bsize; int vecIdx = threadIdx.x % bsize; if (threadIdx.x >= (blockDim.x / bsize)*bsize) { return; } while (row < size) { values[diag[row]*bsize + vecIdx] = source[row * bsize + vecIdx]; row += gridDim.x * nzPerBlock; } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_full(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.get_num_rows(); int f_size = A.get_num_rows(); int diag = Ac.hasProps(DIAG); if (A.manager->B2L_rings[0].size() > 2) { FatalError("Aggregation_AMG_Level prepareNextLevelMatrix not implemented >1 halo rings", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } //get coarse -> fine global renumbering IVector renumbering(c_size); int num_blocks = min(4096, (c_size + 127) / 128); coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), renumbering.raw(), f_size, 0); cudaCheckError(); // // Step 0 - form halo matrices that are exported to neighbors // std::vector<Matrix<TConfig> > halo_rows(num_neighbors); std::vector<DistributedManager<TConfig> > halo_btl(num_neighbors); for (int i = 0; i < num_neighbors; i++ ) { int num_unique = Ac.manager->B2L_rings[i][1]; //prepare export halo matrices halo_btl[i].resize(1, 1); halo_btl[i].set_global_id(Ac.manager->global_id()); halo_btl[i].B2L_maps[0].resize(num_unique); halo_btl[i].B2L_rings[0].resize(2); halo_btl[i].B2L_rings[0][0] = 0; halo_btl[i].B2L_rings[0][1] = num_unique; halo_btl[i].set_index_range(A.manager->index_range()); halo_btl[i].set_base_index(A.manager->base_index()); //global indices of rows of the halo matrix thrust::copy(thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator( renumbering.begin(), Ac.manager->B2L_maps[i].begin() + num_unique), halo_btl[i].B2L_maps[0].begin()); cudaCheckError(); halo_rows[i].addProps(CSR); if (diag) { halo_rows[i].addProps(DIAG); } //calculate row length and row_offsets halo_rows[i].row_offsets.resize(num_unique + 1); thrust::transform(thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(Ac.row_offsets.begin() + 1, Ac.manager->B2L_maps[i].end()), thrust::make_permutation_iterator(Ac.row_offsets.begin(), Ac.manager->B2L_maps[i].begin()), halo_rows[i].row_offsets.begin(), thrust::minus<IndexType>()); cudaCheckError(); thrust::exclusive_scan(halo_rows[i].row_offsets.begin(), halo_rows[i].row_offsets.end(), halo_rows[i].row_offsets.begin()); cudaCheckError(); //resize halo matrix IndexType num_nz = halo_rows[i].row_offsets[num_unique]; halo_rows[i].resize(num_unique, num_unique, num_nz, Ac.get_block_dimy(), Ac.get_block_dimx(), 1); //copy relevant rows and renumber their column indices num_blocks = min(4096, (num_unique + 127) / 128); export_matrix_elements <<< num_blocks, 128>>>(Ac.row_offsets.raw(), Ac.col_indices.raw(), Ac.values.raw(), Ac.manager->B2L_maps[i].raw(), renumbering.raw(), halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), A.get_block_size(), num_unique); cudaCheckError(); if (diag) { export_matrix_diagonal <<< num_blocks, 128>>>(Ac.values.raw() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size(), Ac.get_block_size(), Ac.manager->B2L_maps[i].raw(), halo_rows[i].values.raw() + halo_rows[i].row_offsets[halo_rows[i].get_num_rows()]*Ac.get_block_size(), num_unique); cudaCheckError(); } } Ac.manager->getComms()->exchange_matrix_halo(halo_rows, halo_btl, Ac); //--------------------- renumbering/reordering matrix, integrating halo ----------------------------- Ac.set_initialized(0); //number of owned rows c_size = Ac.manager->halo_offsets[0]; f_size = A.manager->halo_offsets[0]; num_blocks = min(4096, (c_size + 511) / 512); int rings = 1; // // Step 1 - calculate inverse renumbering (to global indices - base_index) // Ac.manager->inverse_renumbering.resize(c_size); thrust::transform(renumbering.begin(), renumbering.begin() + c_size, thrust::constant_iterator<IndexType>(A.manager->base_index()), Ac.manager->inverse_renumbering.begin(), thrust::minus<IndexType>()); cudaCheckError(); //big renumbering table for going from global index to owned local index IVector global_to_coarse_local(Ac.manager->index_range()); thrust::fill(global_to_coarse_local.begin(), global_to_coarse_local.begin() + Ac.manager->index_range(), -1); cudaCheckError(); calc_gbl_renumbering <<< num_blocks, 512>>>(Ac.manager->inverse_renumbering.raw(), global_to_coarse_local.raw(), c_size); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); cudaCheckError(); // // Step 2 - create big mapping table of all halo indices we received (this may use a little too much memory sum(fine nodes per neighbor) // thrust::host_vector<INDEX_TYPE> neighbor_rows(num_neighbors + 1); int max_num_rows = 0; for (int i = 0; i < num_neighbors; i++) { neighbor_rows[i] = halo_rows[i].manager->index_range(); max_num_rows = max_num_rows > halo_rows[i].get_num_rows() ? max_num_rows : halo_rows[i].get_num_rows(); } thrust::exclusive_scan(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows.begin()); cudaCheckError(); int total_rows_of_neighbors = neighbor_rows[num_neighbors]; IVector halo_mapping(total_rows_of_neighbors); thrust::fill(halo_mapping.begin(), halo_mapping.end(), -1); cudaCheckError(); for (int ring = 0; ring < rings; ring++) { for (int i = 0; i < num_neighbors; i++) { int size = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (size + 127) / 128); create_halo_mapping <<< num_blocks, 128>>>(halo_mapping.raw() + neighbor_rows[i], halo_btl[i].B2L_maps[0].raw() + halo_btl[i].B2L_rings[0][ring], halo_btl[i].base_index(), Ac.manager->halo_offsets[ring * num_neighbors + i], size); } } cudaCheckError(); // // Step 3 - renumber halo matrices and calculate row length (to eventually append to the big matrix) // INDEX_TYPE owned_nnz = Ac.row_offsets[c_size]; IVector neighbor_rows_d(num_neighbors + 1); thrust::copy(neighbor_rows.begin(), neighbor_rows.end(), neighbor_rows_d.begin()); cudaCheckError(); //map column indices of my own matrix (the ones that point outward) map_col_indices <<< num_blocks, 512>>>(Ac.row_offsets.raw() + Ac.manager->num_interior_nodes(), Ac.col_indices.raw(), Ac.manager->halo_ranges.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), renumbering.raw(), num_neighbors, c_size, c_size - Ac.manager->num_interior_nodes()); cudaCheckError(); IVector temp_row_len(max_num_rows); for (int i = 0; i < num_neighbors; i++) { //map column indices of halo matrices int size = halo_rows[i].get_num_rows(); int num_blocks = min(4096, (size + 127) / 128); map_col_indices_and_count_rowlen <<< num_blocks, 128, 128 * sizeof(INDEX_TYPE)>>>( halo_rows[i].row_offsets.raw(), halo_rows[i].col_indices.raw(), temp_row_len.raw(), global_to_coarse_local.raw(), halo_mapping.raw(), neighbor_rows_d.raw(), Ac.manager->halo_ranges.raw(), i, Ac.manager->global_id(), Ac.manager->base_index(), Ac.manager->index_range(), num_neighbors, size); for (int ring = 0; ring < rings; ring++) { thrust::copy(temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring], temp_row_len.begin() + halo_btl[i].B2L_rings[0][ring + 1], Ac.row_offsets.begin() + Ac.manager->halo_offsets[ring * num_neighbors + i]); } } cudaCheckError(); INDEX_TYPE old_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; thrust::exclusive_scan(Ac.row_offsets.begin() + c_size, Ac.row_offsets.end(), Ac.row_offsets.begin() + c_size, owned_nnz); cudaCheckError(); // // Step 4 - consolidate column indices and values // int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; Ac.col_indices.resize(new_nnz); Ac.values.resize((new_nnz + 1 + diag * (Ac.row_offsets.size() - 2)) * A.get_block_size()); if (diag) { MVector diags(c_size * Ac.get_block_size()); thrust::copy(Ac.values.begin() + old_nnz * Ac.get_block_size(), Ac.values.begin() + old_nnz * Ac.get_block_size() + c_size * Ac.get_block_size(), diags.begin()); thrust::copy(diags.begin(), diags.begin() + c_size * Ac.get_block_size(), Ac.values.begin() + Ac.row_offsets[Ac.get_num_rows()]*Ac.get_block_size()); cudaCheckError(); } int cumulative_num_rows = c_size; for (int i = 0; i < num_neighbors; i++) { for (int ring = 0; ring < rings; ring++) { int num_rows = halo_btl[i].B2L_rings[0][ring + 1] - halo_btl[i].B2L_rings[0][ring]; int num_blocks = min(4096, (num_rows + 127) / 128); reorder_whole_matrix <<< num_blocks, 128>>>(halo_rows[i].row_offsets.raw() + halo_btl[i].B2L_rings[0][ring], halo_rows[i].col_indices.raw(), halo_rows[i].values.raw(), Ac.row_offsets.raw() + Ac.manager->halo_offsets[ring * num_neighbors + i], Ac.col_indices.raw(), Ac.values.raw(), Ac.get_block_size(), num_rows); if (diag) { thrust::copy(halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring])*Ac.get_block_size(), halo_rows[i].values.begin() + (halo_rows[i].row_offsets[halo_rows[i].get_num_rows()] + halo_btl[i].B2L_rings[0][ring + 1])*Ac.get_block_size(), Ac.values.begin() + (Ac.row_offsets[Ac.get_num_rows()] + cumulative_num_rows)*Ac.get_block_size()); cumulative_num_rows += num_rows; } } } cudaCheckError(); Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]); Ac.set_num_rows(Ac.get_num_cols()); Ac.set_num_nz(new_nnz); Ac.delProps(COO); Ac.set_initialized(1); Ac.computeDiagonal(); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_diag(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.manager->halo_offsets[0]; int f_size = A.manager->halo_offsets[0]; int diag = Ac.hasProps(DIAG); Ac.manager->inverse_renumbering.resize(c_size); //get coarse -> fine renumbering int num_blocks = min(4096, (c_size + 127) / 128); coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index()); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); if (!diag) { Ac.computeDiagonal(); } Ac.set_initialized(1); std::vector<MVector> diagonals(num_neighbors); for (int i = 0; i < num_neighbors; i++) { int size = Ac.manager->B2L_rings[i][Ac.manager->B2L_rings.size() - 1]; diagonals[i].resize(Ac.get_block_size()*size); int num_blocks = min(4096, (size + 127) / 128); write_diagonals <<< num_blocks, 128>>>(Ac.values.raw(), Ac.diag.raw(), Ac.manager->B2L_maps[i].raw(), diagonals[i].raw(), Ac.get_block_size(), size); } cudaCheckError(); Ac.manager->getComms()->exchange_vectors(diagonals, Ac, this->tag * 100 + 10 + 2); for (int i = 0; i < num_neighbors; i++) { int size = Ac.manager->halo_offsets[i + 1] - Ac.manager->halo_offsets[i]; if (Ac.hasProps(DIAG)) { thrust::copy(diagonals[i].begin(), diagonals[i].begin() + Ac.get_block_size()*size, Ac.values.begin() + Ac.get_block_size() * (Ac.diagOffset() + Ac.manager->halo_offsets[i])); } else { int num_blocks = min(4096, (size + 127) / 128); write_diagonals_back <<< num_blocks, 128>>>(Ac.values.raw(), Ac.diag.raw() + Ac.manager->halo_offsets[i], diagonals[i].raw(), Ac.get_block_size(), size); } } cudaCheckError(); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix_none(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (A.is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); if (TConfig::memSpace == AMGX_host) { FatalError("Aggregation AMG Not implemented for host", AMGX_ERR_NOT_IMPLEMENTED); } else { int c_size = Ac.manager->halo_offsets[0]; int f_size = A.manager->halo_offsets[0]; int diag = Ac.hasProps(DIAG); Ac.manager->inverse_renumbering.resize(c_size); //get coarse -> fine renumbering int num_blocks = min(4096, (c_size + 127) / 128); coarse_to_global <<< num_blocks, 128>>>(this->m_aggregates.raw(), this->m_aggregates_fine_idx.raw(), Ac.manager->inverse_renumbering.raw(), f_size, -1 * A.manager->base_index()); cudaCheckError(); Ac.manager->set_num_halo_rows(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1] - c_size); Ac.set_initialized(1); if (!diag) { Ac.computeDiagonal(); } } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::prepareNextLevelMatrix(const Matrix<TConfig> &A, Matrix<TConfig> &Ac) { if (m_matrix_halo_exchange == 0) { this->prepareNextLevelMatrix_none(A, Ac); } else if (m_matrix_halo_exchange == 1) { this->prepareNextLevelMatrix_diag(A, Ac); } else if (m_matrix_halo_exchange == 2) { this->prepareNextLevelMatrix_full(A, Ac); } else { FatalError("Invalid Aggregation matrix_halo_exchange parameter", AMGX_ERR_NOT_IMPLEMENTED); } } __global__ void set_halo_rowlen(INDEX_TYPE *work, INDEX_TYPE *output, INDEX_TYPE size, INDEX_TYPE diag) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { if (work[idx + 1] - work[idx] > 0) { output[idx] += work[idx + 1] - work[idx] - (1 - diag); } idx += blockDim.x * gridDim.x; } } template <typename T> __global__ void append_halo_nz(INDEX_TYPE *row_offsets, INDEX_TYPE *new_row_offsets, INDEX_TYPE *col_indices, INDEX_TYPE *new_col_indices, T *values, T *new_values, INDEX_TYPE size, INDEX_TYPE diag, INDEX_TYPE halo_offset, INDEX_TYPE block_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { int add_diag = !diag; if (!diag && new_col_indices[new_row_offsets[idx]] != -1) { add_diag = 0; } //if diag or there is already soimething in the row, then don't add diagonal nonzero (inside diag) int append_offset = -1; for (int i = new_row_offsets[idx]; i < new_row_offsets[idx + 1]; i++) { if (new_col_indices[i] == -1) {append_offset = i; break;} } for (int i = row_offsets[idx]; i < row_offsets[idx + 1]; i++) { if (diag && i == row_offsets[idx]) //if outside diag and this is the first nonzero in a non-empty row, overwrite diagonal value { for (int j = 0; j < block_size; j++) { new_values[(new_row_offsets[size] + halo_offset + idx)*block_size + j] = values[(row_offsets[size] + halo_offset + idx) * block_size + j]; } } int col_idx = col_indices[i]; if (append_offset == -1 && (col_idx != halo_offset + idx)) {printf("ERROR: append offset is -1 but row has nonzeros in it old %d to %d new %d to %d\n", row_offsets[idx], row_offsets[idx + 1], new_row_offsets[idx], new_row_offsets[idx + 1]); append_offset = 0;} if (col_idx != halo_offset + idx || add_diag) { new_col_indices[append_offset] = col_idx; for (int j = 0; j < block_size; j++) { new_values[append_offset * block_size + j] = values[i * block_size + j]; } append_offset++; } } idx += blockDim.x * gridDim.x; } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseB2LMaps(std::vector<IVector> &in_coarse_B2L_maps) { Matrix<TConfig> &A = this->getA(); m_num_all_aggregates = m_num_aggregates; int num_neighbors = A.manager->neighbors.size(); IndexType max_b2l = 0; for (int i = 0; i < num_neighbors; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; } IVector B2L_aggregates(max_b2l); IVector indices(max_b2l); for (int i = 0; i < num_neighbors; i++ ) { int size = A.manager->B2L_rings[i][1]; thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0); thrust::sequence(indices.begin(), indices.begin() + size); //substitute coarse aggregate indices for fine boundary nodes thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size), B2L_aggregates.begin()); //find the unique ones thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()); IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin(); in_coarse_B2L_maps[i].resize(num_unique); //sort it back so we have the original ordering thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin()); thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, in_coarse_B2L_maps[i].begin()); } cudaCheckError(); } __global__ void populate_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE *output, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { output[flags[maps[indices[idx]]]] = maps[indices[idx]]; idx += blockDim.x * gridDim.x; } } __global__ void flag_coarse_boundary(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *maps, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[maps[indices[idx]]] = 1; idx += blockDim.x * gridDim.x; } } __global__ void flag_halo_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { flags[indices[idx] - offset] = 1; idx += blockDim.x * gridDim.x; } } __global__ void apply_halo_aggregate_indices(INDEX_TYPE *flags, INDEX_TYPE *indices, INDEX_TYPE *output, INDEX_TYPE offset, INDEX_TYPE aggregates_offset, INDEX_TYPE size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; while (idx < size) { output[idx] = flags[indices[idx] - offset] + aggregates_offset; idx += blockDim.x * gridDim.x; } } // renumbering the aggregates/communicationg with neighbors template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::setNeighborAggregates() { Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); m_num_all_aggregates = m_num_aggregates; /* WARNING: the matrix reordering always happens inside createRenumbering routine. There are three ways to get to this routine 1. matrix_upload_all -> uploadMatrix -> initializeUploadReorderAll -> reorder_matrix -> createRenumbering 2. read_system_distributed -> renumberMatrixOneRing -> reorder_matrix_owned -> createRenumbering 3. solver_setup -> ... -> AMG_Level::setup -> createCoarseMatrices -> setNeighborAggregates -> createRenumbering If you are reading the renumbering from file you might need to add intercept code in if statement below, otherwise this routine will exit before calling createRenumbering routine (in case of single or disjoint partitions). */ if (this->getA().is_matrix_singleGPU()) { return; } int num_neighbors = A.manager->neighbors.size(); // // Step 0 - set up coarse matrix metadata // if (Ac.manager == NULL) { Ac.manager = new DistributedManager<T_Config>(); } Ac.manager->resize(A.manager->neighbors.size(), 1); Ac.manager->A = &Ac; int f_size = A.get_num_rows(); Ac.manager->setComms(A.manager->getComms()); Ac.manager->set_global_id(A.manager->global_id()); Ac.manager->neighbors = A.manager->neighbors; Ac.manager->set_base_index(A.manager->base_index()); Ac.manager->halo_ranges = A.manager->halo_ranges; Ac.manager->set_index_range(A.manager->index_range()); //-------------------------------------- Section 1 - renumbering ----------------------------------------------------------- // // Step 1 - calculate coarse level B2L maps - any aggregate that has a fine boundary node, becomes a coarse boundary node // m_num_all_aggregates = m_num_aggregates; int vec_size = m_num_aggregates + 1; //A.manager->num_boundary_nodes()+1; IVector B2L_aggregates(vec_size); for (int i = 0; i < A.manager->neighbors.size(); i++) { thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, 0); int size = A.manager->B2L_rings[i][1]; int block_size = 128; int grid_size = std::min( 4096, ( size + block_size - 1 ) / block_size); flag_coarse_boundary <<< grid_size, block_size>>>(B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), size); thrust::exclusive_scan(B2L_aggregates.begin(), B2L_aggregates.begin() + vec_size, B2L_aggregates.begin()); (Ac.manager->B2L_maps)[i].resize(B2L_aggregates[vec_size - 1]); populate_coarse_boundary <<< grid_size, block_size>>>(B2L_aggregates.raw(), A.manager->B2L_maps[i].raw(), this->m_aggregates.raw(), Ac.manager->B2L_maps[i].raw(), size); } cudaCheckError(); for (int i = 0; i < num_neighbors; i++) { Ac.manager->B2L_rings[i].resize(2); Ac.manager->B2L_rings[i][0] = 0; Ac.manager->B2L_rings[i][1] = Ac.manager->B2L_maps[i].size(); } DistributedArranger<T_Config> *prep = new DistributedArranger<T_Config>; prep->initialize_B2L_maps_offsets(Ac, 1); delete prep; Ac.set_num_rows(m_num_aggregates); IVector renumbering(m_num_aggregates + 1); /* +1 is actually not needed, it will be resized in createRenumbering */ Ac.manager->createRenumbering(renumbering); // // Step 2 - renumber aggregates, so boundary nodes will have higher index than interior ones (based on the renumberiong we have been calculating) // /* WARNING: 1. Thrust scatter and gather routines seem more appropriate here, but they implicitly assume that the input and output have certain size correlation, which is not matched by vectors in our case. The only remaining option is to use make_permutation as is done below. Example of Thrust scatter and gather calls IVector ttt(f_size,-1); thrust::scatter(this->m_aggregates.begin(), this->m_aggregates.begin()+f_size, renumbering.begin(), ttt.begin()); thrust::gather(renumbering.begin(), renumbering.end(), this->m_aggregates.begin(), ttt.begin()); thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin()); 2. The original thrust composite call is illegal because it uses the same array (m_aggregates) for input and output. thrust::copy(thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()), thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()+f_size), this->m_aggregates.begin()); Although it somehow still works, it is much safer to use explicit temporary storage for the intermediate result. */ /* WARNING: must save unreordered aggregates for later use before reordering them. */ IVector unreordered_aggregates(this->m_aggregates); /* WARNING: change Thrust call to explicitly use temporary storage for the intermediate result. The earlier version is illegal, but somehow still works. */ IVector ttt(f_size, -1); thrust::copy(thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin()), thrust::make_permutation_iterator(renumbering.begin(), this->m_aggregates.begin() + f_size), ttt.begin()); thrust::copy(ttt.begin(), ttt.end(), this->m_aggregates.begin()); cudaCheckError(); //we don't need renumbering anymore, it will be identity on the coarse level //-------------------------------------- Section 2 - communication ----------------------------------------------------------- // // Step 3 - populate aggregates_fine_idx, which stores for every fine node the original global index of the aggregate (which is lowest global index of nodes aggregated together) // // // These are different when we do /don't do matrix halo exchanges - when we do we need global indices to match nodes, // and in this case Ac after computeA will not have the same ordering of halo nodes as after prepareNextLevel_full. // However when we do not do matrix halo exchange we are only interested in the ordering of halo nodes on the coarse level, // and we can get that by exchanging the (already renumbered) aggregates vector. // if (m_matrix_halo_exchange == 2) { //Find original global indices of nodes that have the minimum id in the aggregates. thrust::copy(thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin()), thrust::make_permutation_iterator(A.manager->inverse_renumbering.begin(), this->m_aggregates_fine_idx.begin() + f_size), this->m_aggregates_fine_idx.begin()); thrust::transform(this->m_aggregates_fine_idx.begin(), this->m_aggregates_fine_idx.begin() + f_size, thrust::constant_iterator<IndexType>(A.manager->base_index()), this->m_aggregates_fine_idx.begin(), thrust::plus<IndexType>()); //communicate this->m_aggregates_fine_idx.set_block_dimx(1); this->m_aggregates_fine_idx.set_block_dimy(1); m_aggregates_fine_idx.dirtybit = 1; A.manager->exchange_halo(m_aggregates_fine_idx, this->tag * 100 + 1 * 10 + 0); } else { //communicate this->m_aggregates.set_block_dimx(1); this->m_aggregates.set_block_dimy(1); m_aggregates.dirtybit = 1; /* WARNING: you should exchange unreordered aggregates, and append them to your own reordered aggregates, to conform to asusmptions done by distributed_mamanger. */ //A.manager->exchange_halo(m_aggregates, this->tag*100+1*10+0); //wrong A.manager->exchange_halo(unreordered_aggregates, this->tag * 100 + 1 * 10 + 0); thrust::copy(unreordered_aggregates.begin() + f_size, unreordered_aggregates.end(), this->m_aggregates.begin() + f_size); } cudaCheckError(); // // Step 4 - consolidate neighbors' aggregates into own list to be able to perform Galerkin product with the n-ring halo // IVector &exchanged_aggregates = m_matrix_halo_exchange == 2 ? this->m_aggregates_fine_idx : this->m_aggregates; int min_index = thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0xFFFFFFF, thrust::minimum<int>()); int max_index = thrust::reduce(exchanged_aggregates.begin() + A.manager->halo_offsets[0], exchanged_aggregates.begin() + A.manager->halo_offsets[num_neighbors], (int)0, thrust::maximum<int>()); cudaCheckError(); int s_size = max_index - min_index + 2; IVector scratch(s_size); for (int i = 0; i < num_neighbors; i++) { int size = A.manager->halo_offsets[i + 1] - A.manager->halo_offsets[i]; //Could also use local minimums to perform the same operation. The results are the same. //int min_local = thrust::reduce(exchanged_aggregates.begin()+A.manager->halo_offsets[i], exchanged_aggregates.begin()+A.manager->halo_offsets[i+1], (int)0xFFFFFFF, thrust::minimum<int>()); thrust::fill(scratch.begin(), scratch.begin() + s_size, 0); int block_size = 128; int grid_size = std::min( 4096, ( size + block_size - 1 ) / block_size); flag_halo_indices <<< grid_size, block_size>>>(scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, size); thrust::exclusive_scan(scratch.begin(), scratch.begin() + s_size, scratch.begin()); apply_halo_aggregate_indices <<< grid_size, block_size>>>(scratch.raw(), exchanged_aggregates.raw() + A.manager->halo_offsets[i], this->m_aggregates.raw() + A.manager->halo_offsets[i], min_index /*min_local*/, m_num_all_aggregates, size); Ac.manager->halo_offsets[i] = m_num_all_aggregates; m_num_all_aggregates += scratch[s_size - 1]; } cudaCheckError(); Ac.manager->halo_offsets[num_neighbors] = m_num_all_aggregates; } //TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the // nonzero values template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::consolidateVector(VVector &x) { int my_id = this->getA().manager->global_id(); if (this->getA().manager->isRootPartition()) { // Here all partitions being consolidated should have same vector size, see TODO above INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate(); for (int i = 0; i < num_parts; i++) { int current_part = this->getA().manager->getPartsToConsolidate()[i]; // Vector has been set to correct size if (current_part != my_id) { //printf("Root partition %d receiving %d -> %d and %d -> %d (total %d)\n", this->getA().manager->global_id(), this->getA().manager->getConsolidationArrayOffsets()[i], this->getA().manager->getConsolidationArrayOffsets()[i+1], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i], this->getA().manager->getConsolidationArrayOffsets()[num_parts+i+1], (int)x.size()/x.get_block_size()); this->getA().manager->getComms()->recv_vector(x, current_part, 10000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i])); this->getA().manager->getComms()->recv_vector(x, current_part, 20000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i])); } } } else { int my_destination_part = this->getA().manager->getMyDestinationPartition(); int i_off, i_size, b_off, b_size; this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size); // Here all partitions being consolidated should have same vector size, see TODO above this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 10000 + my_id, i_off * x.get_block_size(), i_size * x.get_block_size()); this->getA().manager->getComms()->send_vector_async(x, my_destination_part, 20000 + my_id, b_off * x.get_block_size(), b_size * x.get_block_size()); } } //TODO: The consolidate and unconsolidate parts could be made more efficient by only sending the // nonzero values template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::unconsolidateVector(VVector &x) { if (this->getA().manager->isRootPartition()) { INDEX_TYPE num_parts = this->getA().manager->getNumPartsToConsolidate(); for (int i = 0; i < num_parts; i++) { int current_part = this->getA().manager->getPartsToConsolidate()[i]; // Vector has been set to correct size if (current_part != this->getA().manager->global_id()) { this->getA().manager->getComms()->send_vector_async(x, current_part, 30000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[i + 1] - this->getA().manager->getConsolidationArrayOffsets()[i])); this->getA().manager->getComms()->send_vector_async(x, current_part, 40000 + current_part, x.get_block_size()*this->getA().manager->getConsolidationArrayOffsets()[num_parts + i], x.get_block_size() * (this->getA().manager->getConsolidationArrayOffsets()[num_parts + i + 1] - this->getA().manager->getConsolidationArrayOffsets()[num_parts + i])); } } } else { int my_destination_part = this->getA().manager->getMyDestinationPartition(); // Vector x is of unknown size int i_off, i_size, b_off, b_size; this->getA().manager->getConsolidationOffsets(&i_off, &i_size, &b_off, &b_size); this->getA().manager->getComms()->recv_vector(x, my_destination_part, 30000 + this->getA().manager->global_id(), i_off * x.get_block_size(), i_size * x.get_block_size()); this->getA().manager->getComms()->recv_vector(x, my_destination_part, 40000 + this->getA().manager->global_id(), b_off * x.get_block_size(), b_size * x.get_block_size()); } } template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseVertices() { profileSubphaseFindAggregates(); //Set the aggregates this->Profile.tic("setAggregates"); this->m_selector->setAggregates(this->getA(), this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates); this->Profile.toc("setAggregates"); if ( this->m_print_aggregation_info ) { this->m_selector->printAggregationInfo( this->m_aggregates, this->m_aggregates_fine_idx, this->m_num_aggregates ); } this->getA().template setParameter< int > ("aggregates_num", this->m_num_aggregates); // ptr to aaggregates } // Creating the next level template <class T_Config> void Aggregation_AMG_Level_Base<T_Config>::createCoarseMatrices() { Matrix<TConfig> &A = this->getA(); Matrix<TConfig> &Ac = this->getNextLevel( MemorySpace( ) )->getA(); profileSubphaseFindAggregates(); int num_parts, num_fine_neighbors, my_id; if (!A.is_matrix_singleGPU()) { num_parts = A.manager->getComms()->get_num_partitions(); num_fine_neighbors = A.manager->neighbors.size(); my_id = A.manager->global_id(); } else { num_parts = 1; num_fine_neighbors = 0; my_id = 0; } if (!A.is_matrix_singleGPU() && this->isConsolidationLevel()) { // ---------------------------------------------------- // Consolidate multiple fine matrices into one coarse matrix // ---------------------------------------------------- // ---------------- // Step 1 // Decide which partitions should be merged together, store in destination_partitions vector // --------------- IVector_h &destination_part = A.manager->getDestinationPartitions(); int my_destination_part = A.manager->getMyDestinationPartition(); if (my_destination_part >= num_parts) { FatalError("During consolidation, sending data to partition that doesn't exist", AMGX_ERR_NOT_IMPLEMENTED); } // Create mapping from coarse partition indices (ranks on the coarse consolidated level) to partition indices on the fine level (ranks on the fine level) IVector_h coarse_part_to_fine_part = destination_part; thrust::sort(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()); cudaCheckError(); coarse_part_to_fine_part.erase(thrust::unique(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end()), coarse_part_to_fine_part.end()); cudaCheckError(); //Then, the number of coarse partitions is simply the size of this vector int num_coarse_partitions = coarse_part_to_fine_part.size(); // Create mapping from fine partition indices to coarse partition indices, with fine partitions that are merging together having the same coarse indices IVector_h fine_part_to_coarse_part(num_parts); thrust::lower_bound(coarse_part_to_fine_part.begin(), coarse_part_to_fine_part.end(), destination_part.begin(), destination_part.end(), fine_part_to_coarse_part.begin()); cudaCheckError(); // Create mapping from this specific partition's neighbors to consolidated coarse neighbors, but using their fine index (aka. destination partition indices for my neighbors) IVector_h fine_neigh_to_fine_part; A.manager->createNeighToDestPartMap(fine_neigh_to_fine_part, A.manager->neighbors, destination_part, num_fine_neighbors); // Create mapping from consolidated coarse neighbors to fine partition indices (even if the current partition is not going to be a root) IVector_h coarse_neigh_to_fine_part; int num_coarse_neighbors; A.manager->createConsolidatedNeighToPartMap(coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, destination_part, num_coarse_neighbors); // Create mapping from fine neighbors to coarse neighbors, with fine neighbors this partition is merging with labeled with -1 IVector_h fine_neigh_to_coarse_neigh; A.manager->createNeighToConsNeigh(fine_neigh_to_coarse_neigh, coarse_neigh_to_fine_part, fine_neigh_to_fine_part, my_destination_part, num_fine_neighbors); /* EXAMPLE Take the following partition graph (that describes connections between partitions, vertices are the partitions themselves), this is the same graph that is used in the setup example number of partitions num_parts=12 CSR row_offsets [0 4 8 13 21 25 32 36 41 46 50 57 61] CSR col_indices [0 1 3 8 0 1 2 3 1 2 3 4 5 0 1 2 3 4 5 8 10 2 4 5 6 2 3 4 5 6 7 10 4 5 6 7 5 6 7 9 10 0 3 8 10 11 7 9 10 11 3 5 7 8 9 10 11 8 9 10 11] destination_part = [0 0 0 0 4 4 4 4 8 8 8 8] coarse_part_to_fine_part = [0 4 8] num_coarse_partitions = 3 fine_part_to_coarse_part = [0 0 0 0 1 1 1 1 2 2 2 2] original neighbor lists correspond to the rows of the matrix, minus the diagonal elements: (part 0)[1 3 8] (part 3)[0 1 2 4 5 8 10] (part 10)[3 5 7 8 9 11] fine_neigh_to_fine_part (part 0)[0 0 2] (part 3)[0 0 0 0 1 2 2] (part 10)[0 1 1 2 2 2] coarse_neigh_to_fine_part (part 0)[8] (part 3)[4 8] (part 10)[0 4] fine_neigh_to_coarse_neigh (part 0)[-1 -1 0] (part 3)[-1 -1 -1 0 0 1 1] (part 10)[0 1 1 -1 -1 -1] */ // -------------------------- // Step 2 // Create coarse B2L_maps, by mapping fine B2L maps to coarse indices using this->m_aggregates and eliminating duplicates // -------------------------- std::vector<IVector> coarse_B2L_maps(num_fine_neighbors); m_num_all_aggregates = m_num_aggregates; int num_neighbors_temp = A.manager->neighbors.size(); int num_rings = A.manager->B2L_rings[0].size() - 1; if (num_rings != 1) { FatalError("num_rings > 1 not supported in consolidation\n", AMGX_ERR_NOT_IMPLEMENTED); } IndexType max_b2l = 0; for (int i = 0; i < num_neighbors_temp; i++ ) { max_b2l = max_b2l > A.manager->B2L_rings[i][1] ? max_b2l : A.manager->B2L_rings[i][1]; } IVector B2L_aggregates(max_b2l); IVector indices(max_b2l); //TODO: use the algorithm from setNeighborAggregates() for (int i = 0; i < num_neighbors_temp; i++ ) { int size = A.manager->B2L_rings[i][1]; thrust::fill(B2L_aggregates.begin(), B2L_aggregates.begin() + size, 0); thrust::sequence(indices.begin(), indices.begin() + size); //substitute coarse aggregate indices for fine boundary nodes thrust::copy(thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin()), thrust::make_permutation_iterator(this->m_aggregates.begin(), A.manager->B2L_maps[i].begin() + size), B2L_aggregates.begin()); //find the unique ones thrust::sort_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()); IndexType num_unique = thrust::unique_by_key(B2L_aggregates.begin(), B2L_aggregates.begin() + size, indices.begin()).first - B2L_aggregates.begin(); coarse_B2L_maps[i].resize(num_unique); //sort it back so we have the original ordering thrust::sort_by_key(indices.begin(), indices.begin() + num_unique, B2L_aggregates.begin()); thrust::copy(B2L_aggregates.begin(), B2L_aggregates.begin() + num_unique, coarse_B2L_maps[i].begin()); } cudaCheckError(); /* * EXAMPLE say, partition 3 has the following coarse B2L_maps: neighbors [0 1 2 4 5 8 10] B2L_maps[0(=0)] = [6 7 8] B2L_maps[1(=1)] = [8 9 10] B2L_maps[2(=2)] = [10 11 12 13] B2L_maps[3(=4)] = [13 14 15] B2L_maps[4(=5)] = [15 16 17] B2L_maps[5(=8)] = [6 18 19] B2L_maps[6(=10)] = [17 20 19] */ // --------------------------------------------------- // Step 3 // create new B2L maps for each merged destination neighbor and drop B2L maps to neighbors we are merging with // --------------------------------------------------- std::vector<IVector> dest_coarse_B2L_maps; A.manager->consolidateB2Lmaps(dest_coarse_B2L_maps, coarse_B2L_maps, fine_neigh_to_coarse_neigh, num_coarse_neighbors, num_fine_neighbors); /* * EXAMPLE Then, merging the coarse B2L maps on partition 3, we get: coarse_neigh_to_fine_part [4 8] dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17] dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20] */ // ----------------------- // Step 4 // Create interior-boundary renumbering of aggregates according to dest_coarse_B2L_maps // ----------------------- // Now renumber the aggregates with all interior aggregates first, boundary aggregates second int num_interior_aggregates; //returned by createAggregatesRenumbering int num_boundary_aggregates; //returned by createAggregatesRenumbering IVector renumbering; //returned by createAggregatesRenumbering // Following calls create renumbering array and modifies B2L_maps A.manager->createAggregatesRenumbering(renumbering, dest_coarse_B2L_maps, this->m_num_aggregates, num_coarse_neighbors, num_interior_aggregates, num_boundary_aggregates, num_rings); /* * EXAMPLE Partition 3 will get a renumbering vector of size 21, for the 21 owned agggregates: [0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20] num_interior_aggregates = 12 num_boundary_aggregates = 9 */ // ------------------------------------------------- // Step 5 // Determine whether root partition, make list of partitions merged into one // ------------------------------------------------ // Check if I'm root partition and how fine partitions (including myself) are merging into me bool is_root_partition = false; int num_fine_parts_to_consolidate = 0; IVector_h fine_parts_to_consolidate; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { is_root_partition = true; num_fine_parts_to_consolidate++; } } fine_parts_to_consolidate.resize(num_fine_parts_to_consolidate); int count = 0; for (int i = 0; i < num_parts; i++) { if (destination_part[i] == my_id) { fine_parts_to_consolidate[count] = i; count++; } } //save this information as state, as this will also be required during solve for restriction/prolongation A.manager->setIsRootPartition(is_root_partition); A.manager->setNumPartsToConsolidate(num_fine_parts_to_consolidate); A.manager->setPartsToConsolidate(fine_parts_to_consolidate); /* * EXAMPLE isRootPartition is true for partitions 0,4,8 false for others num_fine_parts_to_consolidate = 4 for partitions 0,4,8 fine_parts_to_consolidate (part 0)[0 1 2 3] (part 4)[4 5 6 7] (part 8)[8 9 10 11] */ // ---------------------- // Step 6 // Compute number of interior, boundary and total nodes in the consolidated coarse matrix. Create offsets so that partitions being merged together will have their aggregate indices ordered like this: // [num_interior(fine_parts_to_consolidate[0]] num_interior(fine_parts_to_consolidate[1]] ... num_interior(fine_parts_to_consolidate[num_fine_parts_to_consolidate] // num_boundary(fine_parts_to_consolidate[0]] num_boundary(fine_parts_to_consolidate[1]] ... num_boundary(fine_parts_to_consolidate[num_fine_parts_to_consolidate] ] // ---------------------- // Gather to get number of interior/boundary aggregates of neighbors I will merge with std::vector<IVector_h> vertex_counts; int interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged; int total_rows_in_merged; //Computes these offsets on the root, sends them back A.manager->computeConsolidatedOffsets(my_id, my_destination_part, is_root_partition, num_interior_aggregates, num_boundary_aggregates, vertex_counts, fine_parts_to_consolidate, num_fine_parts_to_consolidate, interior_offset, boundary_offset, total_interior_rows_in_merged, total_boundary_rows_in_merged, total_rows_in_merged, A.manager->getComms()); //Partitions save these offsets, as it will be required during solve restriction/prolongation A.manager->setConsolidationOffsets(interior_offset, num_interior_aggregates, boundary_offset + num_interior_aggregates, num_boundary_aggregates); /* * EXAMPLE For root partition 0, say we have the following interior/boundary counts (note that partition 1 has 0 boundary, as it is only connected to partitions it is merging with) part 0 - interior: 10 boundary 3 part 1 - interior: 18 part 2 - interior: 10 boundary 16 part 3 - interior: 12 boundary 9 interior_offset for partitions 0,1,2,3: 0 10 28 38 (total_interior_rows_in_merged 50) boundary_offset for partitions 0,1,2,3: 0 3 3 19 (total_boundary_rows_in_merged 28) */ // ---------------------- // Step 7 // Each partition renumbers its aggregates and dest_coarse_B2L_maps using offsets computed in Step 6 and permutation in Step 4 // ---------------------- // Kernel to renumber the aggregates int block_size = 128; int grid_size = std::min( 4096, ( A.manager->halo_offsets[0] + block_size - 1 ) / block_size); renumberAggregatesKernel <<< grid_size, block_size >>>(renumbering.raw(), interior_offset, boundary_offset, this->m_aggregates.raw(), A.manager->halo_offsets[0], num_interior_aggregates, renumbering.size()); cudaCheckError(); for (int i = 0; i < num_coarse_neighbors; i++) { thrust::transform(dest_coarse_B2L_maps[i].begin(), dest_coarse_B2L_maps[i].end(), thrust::constant_iterator<IndexType>(boundary_offset), dest_coarse_B2L_maps[i].begin(), thrust::plus<IndexType>()); } cudaCheckError(); /* * EXAMPLE Partition 3 had a renumbering vector: [0 1 2 3 4 5 17 6 7 8 9 10 11 12 13 14 15 16 18 19 20] which is now adjusted to account for the consolidated coarse matrices' indices: [38 39 40 41 42 43 74 44 45 46 47 48 49 69 70 71 72 73 75 76 77] And the dest_coarse_B2L_maps, which looked like: dest_coarse_B2L_maps[0(=4)] = [13 14 15 16 17] dest_coarse_B2L_maps[1(=8)] = [6 17 18 19 20] is now: dest_coarse_B2L_maps[0(=4)] = [69 70 71 72 73] dest_coarse_B2L_maps[1(=8)] = [74 73 75 76 77] */ // ------------------------------------------------- // Step 8 // Send dest_coarse_B2L_maps to root partitions // ------------------------------------------------ // Each fine partition sends to its root the number of coarse neighbors it has, their ids, and the number of boundary nodes for each coarse neighbor IVector_h num_bdy_per_coarse_neigh(num_coarse_neighbors); for (int i = 0; i < num_coarse_neighbors; i++) { num_bdy_per_coarse_neigh[i] = dest_coarse_B2L_maps[i].size(); } IVector_h consolidated_coarse_neigh_to_fine_part; //consolidated list of coarse neighbors for the root partition, using fine partition indices int num_consolidated_neighbors = 0; std::vector<IVector> consolidated_B2L_maps; //concatenates dest_coarse_B2L_maps received from partitions that are merging into the same root and pointing to the same destination coarse neighbor A.manager->consolidateB2LmapsOnRoot(num_consolidated_neighbors, consolidated_B2L_maps, consolidated_coarse_neigh_to_fine_part, dest_coarse_B2L_maps, coarse_neigh_to_fine_part, num_bdy_per_coarse_neigh, fine_parts_to_consolidate, num_fine_parts_to_consolidate, my_id, my_destination_part, is_root_partition, num_coarse_neighbors, A.manager->getComms()); // // Step 9 - figuring out halo aggregate IDs // //Now we need to update halo aggregate IDs - this is just a halo exchange on this->m_aggregates between partitions //that are being merged together, but we need to send other halos to the root to come up with the halo renumbering //TODO: separate transactions, send "real halo" to the root nodes (coarse neighbors) immediately //Step 9.1: takes care of synchronizing the aggregate IDs between partitions we are merging together and got consistent halo aggregate IDs for neighbor we are not merging with (which are going to be sent to the root in 9.2) A.manager->exchange_halo(this->m_aggregates, 6666); /* * EXAMPLE 2 This example is independent from the previous ones. Say partition 0 and 1 are merging (into 0) partition 0 is neighbors with 1,2,3 and partition 1 is neighbors with 0,3,4 Partitions 3 and 4 are merging (into partition 3) and partition 2 is not merging with anyone. This example details the renumbering of halo indices on partition 0 and partition 1. After the exchange halo, we have: this->m_aggregates on partition 0: [(fine interior nodes) (fine boundary nodes) (fine halo from part 1) (fine halo from part 2) (fine halo from part 3)] [(fine interior nodes) (fine boundary nodes) (13 13 15) (12 15 17) (14 16 18)] aggregates on partition 1: [(fine interior nodes) (fine boundary nodes) (fine halo from part 0) (fine halo from part 3) (fine halo from part 4)] [(fine interior nodes) (fine boundary nodes) (14 16 17) (18 19 19) (15 15 17)] indices in (fine halo from part 0) and (fine halo from part 1) actually contain interior aggregate indices (if they are not connected to partitions 2,3 or 4), because the boundary is disappearing there. Indices in halo regions contain remote-local indices. This example is used throughout consolidateAndRenumberHalos */ //Step 9.2 - 9.5 IVector_h halo_offsets(num_consolidated_neighbors + 1, 0); A.manager->consolidateAndRenumberHalos(this->m_aggregates, A.manager->halo_offsets, halo_offsets, A.manager->neighbors, num_fine_neighbors, consolidated_coarse_neigh_to_fine_part, num_consolidated_neighbors, destination_part, my_destination_part, is_root_partition, fine_parts_to_consolidate, num_fine_parts_to_consolidate, num_parts, my_id, total_rows_in_merged, this->m_num_all_aggregates, A.manager->getComms()); if (is_root_partition) { for (int i = 0; i < consolidated_B2L_maps.size(); i++) { thrust::sort(consolidated_B2L_maps[i].begin(), consolidated_B2L_maps[i].end()); } cudaCheckError(); } // Step 10 do the Galerkin product // ViewType oldView = this->getA().currentView(); this->getA().setView(ALL); // If we reuse the level we keep the previous restriction operator this->Profile.tic("computeR"); computeRestrictionOperator(); this->Profile.toc("computeR"); profileSubphaseComputeCoarseA(); this->Profile.tic("computeA"); Ac.copyAuxData(&A); if (Ac.manager == NULL) { Ac.manager = new DistributedManager<TConfig>(); } this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates); Ac.setColsReorderedByColor(false); ViewType oldViewC = Ac.currentView(); Ac.setView(FULL); this->Profile.toc("computeA"); // // Step 11, send matrices to root, consolidate // if (!is_root_partition) { A.manager->getComms()->send_vector_async(Ac.row_offsets, my_destination_part, 1111); A.manager->getComms()->send_vector_async(Ac.col_indices, my_destination_part, 1112); A.manager->getComms()->send_vector_async(Ac.values, my_destination_part, 1113); } else { int total_num_rows = this->m_num_all_aggregates; IVector new_row_offsets(total_num_rows + 1, 0); //if diags are inside then we won't be counting those twice when computing halo row length if (!Ac.hasProps(DIAG)) { thrust::fill(new_row_offsets.begin() + halo_offsets[0], new_row_offsets.begin() + halo_offsets[num_consolidated_neighbors], 1); cudaCheckError(); } std::vector<IVector> recv_row_offsets(num_fine_parts_to_consolidate); std::vector<VecInt_t> num_nz(num_fine_parts_to_consolidate); IVector *work_row_offsets; std::vector<VecInt_t> index_offset_array(2 * num_fine_parts_to_consolidate + 1); int interior_offset = 0; int boundary_offset = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { boundary_offset += vertex_counts[i][0]; } int max_num_nz = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; //receive row offsets if (current_part != my_id) { recv_row_offsets[i].resize(total_num_rows + 1); A.manager->getComms()->recv_vector(recv_row_offsets[i], current_part, 1111); work_row_offsets = &(recv_row_offsets[i]); num_nz[i] = (*work_row_offsets)[work_row_offsets->size() - 1]; max_num_nz = max_num_nz > num_nz[i] ? max_num_nz : num_nz[i]; } else { work_row_offsets = &(Ac.row_offsets); num_nz[i] = Ac.get_num_nz(); } //Get interior row length thrust::transform(work_row_offsets->begin() + interior_offset + 1, work_row_offsets->begin() + interior_offset + vertex_counts[i][0] + 1, work_row_offsets->begin() + interior_offset, new_row_offsets.begin() + interior_offset, thrust::minus<IndexType>()); cudaCheckError(); //Get boundary row length thrust::transform(work_row_offsets->begin() + boundary_offset + 1, work_row_offsets->begin() + boundary_offset + vertex_counts[i][1] + 1, work_row_offsets->begin() + boundary_offset, new_row_offsets.begin() + boundary_offset, thrust::minus<IndexType>()); cudaCheckError(); //Increment halo row length by one for every nonzero that is an edge from the halo into this partition int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0]; const int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); set_halo_rowlen <<< num_blocks, block_size>>>(work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], size, Ac.hasProps(DIAG)); cudaCheckError(); index_offset_array[i] = interior_offset; index_offset_array[num_fine_parts_to_consolidate + i] = boundary_offset; interior_offset += vertex_counts[i][0]; boundary_offset += vertex_counts[i][1]; index_offset_array[i + 1] = interior_offset; index_offset_array[num_fine_parts_to_consolidate + i + 1] = boundary_offset; } A.manager->setConsolidationArrayOffsets(index_offset_array); //Exclusive scan row length array to get row offsets thrust::exclusive_scan(new_row_offsets.begin(), new_row_offsets.end(), new_row_offsets.begin()); cudaCheckError(); //Prepare to receive column indices and values int num_nz_consolidated = new_row_offsets[new_row_offsets.size() - 1]; IVector recv_col_indices(max_num_nz); IVector new_col_indices(num_nz_consolidated); MVector recv_values((max_num_nz + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); MVector new_values((num_nz_consolidated + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); thrust::fill(new_col_indices.begin() + new_row_offsets[halo_offsets[0]], new_col_indices.end(), -1); //Set all the halo col indices to -1 if (!Ac.hasProps(DIAG)) { thrust::fill(new_values.begin() + num_nz_consolidated * Ac.get_block_size(), new_values.end(), types::util<ValueTypeA>::get_zero()); } cudaCheckError(); IVector *work_col_indices; MVector *work_values; interior_offset = 0; boundary_offset = 0; for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; boundary_offset += vertex_counts[i][0]; } for (int i = 0; i < num_fine_parts_to_consolidate; i++) { int current_part = fine_parts_to_consolidate[i]; if (current_part != my_id) { A.manager->getComms()->recv_vector(recv_col_indices, current_part, 1112, 0, num_nz[i]); A.manager->getComms()->recv_vector(recv_values, current_part, 1113, 0, (num_nz[i] + 1 + Ac.hasProps(DIAG) * (halo_offsets[num_consolidated_neighbors] - 1))*Ac.get_block_size()); work_col_indices = &(recv_col_indices); work_row_offsets = &(recv_row_offsets[i]); work_values = &(recv_values); } else { work_row_offsets = &(Ac.row_offsets); work_col_indices = &(Ac.col_indices); work_values = &(Ac.values); } //Put interior rows in place thrust::copy(work_col_indices->begin() + (*work_row_offsets)[interior_offset], work_col_indices->begin() + (*work_row_offsets)[interior_offset + vertex_counts[i][0]], new_col_indices.begin() + new_row_offsets[interior_offset]); cudaCheckError(); thrust::copy(work_values->begin() + (*work_row_offsets)[interior_offset]*Ac.get_block_size(), work_values->begin() + ((*work_row_offsets)[interior_offset + vertex_counts[i][0]])*Ac.get_block_size(), new_values.begin() + new_row_offsets[interior_offset]*Ac.get_block_size()); cudaCheckError(); //Put boundary rows in place thrust::copy(work_col_indices->begin() + (*work_row_offsets)[boundary_offset], work_col_indices->begin() + (*work_row_offsets)[boundary_offset + vertex_counts[i][1]], new_col_indices.begin() + new_row_offsets[boundary_offset]); cudaCheckError(); thrust::copy(work_values->begin() + (*work_row_offsets)[boundary_offset]*Ac.get_block_size(), work_values->begin() + ((*work_row_offsets)[boundary_offset + vertex_counts[i][1]])*Ac.get_block_size(), new_values.begin() + new_row_offsets[boundary_offset]*Ac.get_block_size()); cudaCheckError(); //Process halo rows (merge) int size = halo_offsets[num_consolidated_neighbors] - halo_offsets[0]; const int block_size = 128; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (size - 1) / block_size + 1); //TODO: vectorise this kernel, will be inefficient for larger block sizes append_halo_nz <<< num_blocks, block_size>>>(work_row_offsets->raw() + halo_offsets[0], new_row_offsets.raw() + halo_offsets[0], work_col_indices->raw(), new_col_indices.raw(), work_values->raw(), new_values.raw(), size, Ac.hasProps(DIAG), halo_offsets[0], Ac.get_block_size()); cudaCheckError(); // Diagonals if (Ac.hasProps(DIAG)) { // Diagonal corresponding to interior rows thrust::copy(work_values->begin() + (num_nz[i] + interior_offset)*Ac.get_block_size(), work_values->begin() + (num_nz[i] + interior_offset + vertex_counts[i][0])*Ac.get_block_size(), new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + interior_offset)*Ac.get_block_size()); // Diagonal corresponding to boundary rows thrust::copy(work_values->begin() + (num_nz[i] + boundary_offset)*Ac.get_block_size(), work_values->begin() + (num_nz[i] + boundary_offset + vertex_counts[i][1])*Ac.get_block_size(), new_values.begin() + (new_row_offsets[halo_offsets[halo_offsets.size() - 1]] + boundary_offset)*Ac.get_block_size()); cudaCheckError(); } interior_offset += vertex_counts[i][0]; boundary_offset += vertex_counts[i][1]; } Ac.set_initialized(0); Ac.row_offsets = new_row_offsets; Ac.col_indices = new_col_indices; Ac.values = new_values; } // Create a new distributed communicator for coarse levels that only contains active partitions Ac.manager->setComms(A.manager->getComms()->Clone()); Ac.manager->getComms()->createSubComm(coarse_part_to_fine_part, is_root_partition); // // Step 12 - finalizing, bookkeping // if (is_root_partition) { int my_consolidated_id = fine_part_to_coarse_part[my_id]; for (int i = 0; i < num_consolidated_neighbors; i++) { consolidated_coarse_neigh_to_fine_part[i] = fine_part_to_coarse_part[consolidated_coarse_neigh_to_fine_part[i]]; } Ac.manager->initializeAfterConsolidation( my_consolidated_id, Ac, consolidated_coarse_neigh_to_fine_part, total_interior_rows_in_merged, total_boundary_rows_in_merged, this->m_num_all_aggregates, halo_offsets, consolidated_B2L_maps, 1, true); Ac.manager->B2L_rings.resize(num_consolidated_neighbors + 1); for (int i = 0; i < num_consolidated_neighbors; i++) { Ac.manager->B2L_rings[i].resize(2); Ac.manager->B2L_rings[i][0] = 0; Ac.manager->B2L_rings[i][1] = consolidated_B2L_maps[i].size(); } Ac.manager->set_initialized(Ac.row_offsets); Ac.manager->getComms()->set_neighbors(num_consolidated_neighbors); int new_nnz = Ac.row_offsets[Ac.row_offsets.size() - 1]; Ac.set_num_nz(new_nnz); Ac.set_num_cols(Ac.manager->halo_offsets[Ac.manager->halo_offsets.size() - 1]); Ac.set_num_rows(Ac.get_num_cols()); if (A.hasProps(DIAG)) { Ac.addProps(DIAG); } Ac.computeDiagonal(); Ac.set_initialized(1); } else { Ac.set_initialized(0); // set size of Ac to be zero Ac.resize(0, 0, 0, 1); Ac.set_initialized(1); } this->getA().setView(oldView); Ac.setView(OWNED); } else { /* WARNING: do not recompute prolongation (P) and restriction (R) when you are reusing the level structure (structure_reuse_levels > 0). Notice that in aggregation path, prolongation P is implicit, and is used through the aggregates array. */ if (this->isReuseLevel() == false) { this->setNeighborAggregates(); } this->getA().setView(ALL); // Compute restriction operator // TODO: computing the restriction operator could be merged with the selector to save some work // If we reuse the level we keep the previous restriction operator if (this->isReuseLevel() == false) { profileSubphaseComputeRestriction(); this->Profile.tic("computeR"); computeRestrictionOperator(); this->Profile.toc("computeR"); } profileSubphaseComputeCoarseA(); this->Profile.tic("computeA"); Ac.set_initialized(0); Ac.copyAuxData(&A); this->m_coarseAGenerator->computeAOperator(A, Ac, this->m_aggregates, this->m_R_row_offsets, this->m_R_column_indices, this->m_num_all_aggregates); Ac.setColsReorderedByColor(false); Ac.setView(FULL); this->Profile.toc("computeA"); this->prepareNextLevelMatrix(A, Ac); A.setView(OWNED); Ac.setView(OWNED); } this->m_next_level_size = this->m_num_all_aggregates * Ac.get_block_dimy(); if (this->m_print_aggregation_info) { MatrixAnalysis<TConfig> ana(&Ac); ana.aggregatesQuality2(this->m_aggregates, this->m_num_aggregates, A); } } // ------------------------------------------------------------- // Explicit instantiations // ------------------------------------------------------------- #define AMGX_CASE_LINE(CASE) template class Aggregation_AMG_Level<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } }
efb687c038ef8f3bfafa7bd0158d71ed4094edc2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_utils.h" #include "linalg.h" #include <stdio.h> #include "rocblas.h" #ifdef CUDA extern hipblasHandle_t cublas_handle; #endif __global__ void dgemm_naive(const double *A, const double *B, double *C, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0.; for (int k = 0; k < K; k++) sum += A[row * K + k] * B[k * N + col]; C[row * N + col] = sum; } } __global__ void dgemm_ta_naive(const double *A, const double *B, double *C, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0; for (int k = 0; k < K; k++) sum += A[k * M + row] * B[k * N + col]; C[row * N + col] = sum; } } __global__ void dgemm_tb_naive(const double *A, const double *B, const double *C, double *D, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0; for (int k = 0; k < K; k++) sum += A[row * K + k] * B[col * K + k]; D[row * N + col] = sum + C[row * N + col]; } } // Optimized matrix multiply kernels using shared memory. // Computes C = A*B, where A is a M by K matrix, B is a K by N matrix, C is a M by N matrix. __global__ void dgemm_optimized(const double *A, const double *B, double *C, const int M, const int N, const int K) { double CValue = 0; extern __shared__ double shared[]; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k*dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[Row * ACols + k * dim+ tx]; else As[ty * dim + tx] = 0.0; if (k * dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[(k * dim * BCols) + (ty * BCols) + Col]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) { CValue += As[ty * dim + n] * Bs[n * dim + tx]; } __syncthreads(); } if (Row < CRows && Col < CCols) { C[Row*CCols + Col] = CValue; } } // Computes C = A'*B, where A is a K by M matrix, B is a K by N matrix, C is a M by N matrix. __global__ void dgemm_ta_optimized(const double *A, const double *B, double *C, const int M, const int N, const int K) { double CValue = 0; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; extern __shared__ double shared[]; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k * dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[(k * dim + tx)*ARows + Row]; else As[ty * dim + tx] = 0.0; if (k * dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[(k * dim + ty)*BCols + Col]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) CValue += As[ty * dim + n] * Bs[n * dim + tx]; __syncthreads(); } if (Row < CRows && Col < CCols) { C[Row*CCols + Col] = CValue; } } __global__ void dgemm_tb_optimized(const double *A, const double *B, const double *C, double *D, const size_t M, const size_t K, const size_t N) { double DValue = 0; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; extern __shared__ double shared[]; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k*dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[Row*ACols + k*dim + tx]; else As[ty * dim + tx] = 0.0; if (k*dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[k*dim + Col*BRows + ty]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) DValue += As[ty * dim + n] * Bs[n * dim + tx]; __syncthreads(); } if (Row < CRows && Col < CCols) { D[Row*CCols + Col] = DValue + C[Row*CCols + Col]; } } // Computes C = A*B, where A is a M by K matrix, B is a K by N matrix, C is a M by N matrix. // Matrices are stored in row-major order. void dgemm_gpu(const double *A, const double *B, double *C, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); hipLaunchKernelGGL(( dgemm_naive), dim3(grid), dim3(block), 0, 0, A, B, C, M, N, K); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); hipLaunchKernelGGL(( dgemm_optimized), dim3(dimGrid), dim3(dimBlock), shmem_size, 0, A, B, C, M, N, K); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #endif #else // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // A * B = (A^T)^T * (B^T)^T = A'^T * B'^T = (B' * A')^T const double alpha(1.0); const double beta(0.0); hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, N, A, K, &beta, C, N); #endif } // Computes C = A'*B, where A is a K by M matrix, B is a K by N matrix, C is a M by N matrix. // Matrices are stored in row-major order. void dgemm_ta_gpu(const double *A, const double *B, double *C, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); hipLaunchKernelGGL(( dgemm_ta_naive), dim3(grid), dim3(block), 0, 0, A, B, C, M, N, K); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); hipLaunchKernelGGL(( dgemm_ta_optimized), dim3(dimGrid), dim3(dimBlock), shmem_size, 0, A, B, C, M, N, K); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #endif #else // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // A^T * B = A^T * (B^T)^T = A' * B'^T = (B'*A'^T)^T // M is KxM, B is KxN, C is MxN. /* * FILLME: Use hipblasDgemm() */ const double alpha(1.0); const double beta(0.0); hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, N, M, K, &alpha, B, N, A, M, &beta, C, N); #endif } // Computes D = A*B'+C, where A is a M by K matrix, B is a N by K matrix, C and D are M by N matrices. // Matrices are stored in row-major order. void dgemm_tb_gpu(const double *A, const double *B, const double *C, double *D, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); hipLaunchKernelGGL(( dgemm_tb_naive), dim3(grid), dim3(block), 0, 0, A, B, C, D, M, N, K); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); hipLaunchKernelGGL(( dgemm_tb_optimized), dim3(dimGrid), dim3(dimBlock), shmem_size, 0, A, B, C, D, M, K, N); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); #endif #else // D = A * B^T // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // C = A * B^T = (A^T)^T * B^T = A'^T * B' = (B'^T * A')^T // A is MxK, B is NxK, C is MxN, D is MxN const double alpha(1.0); const double beta(0.0); // D = A * B' hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, M, K, &alpha, B, K, A, K, &beta, D, N); // D = C + D hipblasDgeam(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, &alpha, D, N, &alpha, C, N, D, N); #endif }
efb687c038ef8f3bfafa7bd0158d71ed4094edc2.cu
#include "cuda_utils.h" #include "linalg.h" #include <stdio.h> #include "cublas_v2.h" #ifdef CUDA extern cublasHandle_t cublas_handle; #endif __global__ void dgemm_naive(const double *A, const double *B, double *C, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0.; for (int k = 0; k < K; k++) sum += A[row * K + k] * B[k * N + col]; C[row * N + col] = sum; } } __global__ void dgemm_ta_naive(const double *A, const double *B, double *C, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0; for (int k = 0; k < K; k++) sum += A[k * M + row] * B[k * N + col]; C[row * N + col] = sum; } } __global__ void dgemm_tb_naive(const double *A, const double *B, const double *C, double *D, const int M, const int N, const int K) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < M && col < N) { double sum = 0; for (int k = 0; k < K; k++) sum += A[row * K + k] * B[col * K + k]; D[row * N + col] = sum + C[row * N + col]; } } // Optimized matrix multiply kernels using shared memory. // Computes C = A*B, where A is a M by K matrix, B is a K by N matrix, C is a M by N matrix. __global__ void dgemm_optimized(const double *A, const double *B, double *C, const int M, const int N, const int K) { double CValue = 0; extern __shared__ double shared[]; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k*dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[Row * ACols + k * dim+ tx]; else As[ty * dim + tx] = 0.0; if (k * dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[(k * dim * BCols) + (ty * BCols) + Col]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) { CValue += As[ty * dim + n] * Bs[n * dim + tx]; } __syncthreads(); } if (Row < CRows && Col < CCols) { C[Row*CCols + Col] = CValue; } } // Computes C = A'*B, where A is a K by M matrix, B is a K by N matrix, C is a M by N matrix. __global__ void dgemm_ta_optimized(const double *A, const double *B, double *C, const int M, const int N, const int K) { double CValue = 0; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; extern __shared__ double shared[]; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k * dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[(k * dim + tx)*ARows + Row]; else As[ty * dim + tx] = 0.0; if (k * dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[(k * dim + ty)*BCols + Col]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) CValue += As[ty * dim + n] * Bs[n * dim + tx]; __syncthreads(); } if (Row < CRows && Col < CCols) { C[Row*CCols + Col] = CValue; } } __global__ void dgemm_tb_optimized(const double *A, const double *B, const double *C, double *D, const size_t M, const size_t K, const size_t N) { double DValue = 0; int tx = threadIdx.x; int ty = threadIdx.y; int dim = blockDim.x; int Row = blockIdx.y * dim + ty; int Col = blockIdx.x * dim + tx; extern __shared__ double shared[]; double* As = (double*)shared; double* Bs = (double*)&shared[dim*dim]; int ARows = M; int ACols = K; int BRows = K; int BCols = N; int CRows = M; int CCols = N; for (int k = 0; k < (dim + ACols -1)/dim; k++) { if (k*dim + tx < ACols && Row < ARows) As[ty * dim + tx] = A[Row*ACols + k*dim + tx]; else As[ty * dim + tx] = 0.0; if (k*dim + ty < BRows && Col < BCols) Bs[ty * dim + tx] = B[k*dim + Col*BRows + ty]; else Bs[ty * dim + tx] = 0.0; __syncthreads(); for (int n = 0; n < dim; n++) DValue += As[ty * dim + n] * Bs[n * dim + tx]; __syncthreads(); } if (Row < CRows && Col < CCols) { D[Row*CCols + Col] = DValue + C[Row*CCols + Col]; } } // Computes C = A*B, where A is a M by K matrix, B is a K by N matrix, C is a M by N matrix. // Matrices are stored in row-major order. void dgemm_gpu(const double *A, const double *B, double *C, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); dgemm_naive<<<grid, block>>>(A, B, C, M, N, K); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); dgemm_optimized<<<dimGrid, dimBlock, shmem_size>>>(A, B, C, M, N, K); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #endif #else // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // A * B = (A^T)^T * (B^T)^T = A'^T * B'^T = (B' * A')^T const double alpha(1.0); const double beta(0.0); cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, N, A, K, &beta, C, N); #endif } // Computes C = A'*B, where A is a K by M matrix, B is a K by N matrix, C is a M by N matrix. // Matrices are stored in row-major order. void dgemm_ta_gpu(const double *A, const double *B, double *C, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); dgemm_ta_naive<<<grid, block>>>(A, B, C, M, N, K); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); dgemm_ta_optimized<<<dimGrid, dimBlock, shmem_size>>>(A, B, C, M, N, K); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #endif #else // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // A^T * B = A^T * (B^T)^T = A' * B'^T = (B'*A'^T)^T // M is KxM, B is KxN, C is MxN. /* * FILLME: Use cublasDgemm() */ const double alpha(1.0); const double beta(0.0); cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, N, M, K, &alpha, B, N, A, M, &beta, C, N); #endif } // Computes D = A*B'+C, where A is a M by K matrix, B is a N by K matrix, C and D are M by N matrices. // Matrices are stored in row-major order. void dgemm_tb_gpu(const double *A, const double *B, const double *C, double *D, const int M, const int N, const int K) { #ifndef CUBLAS int BLOCK_SIZE = 16; #if defined(_GPU_GEMM_NAIVE) dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (M + BLOCK_SIZE - 1) / BLOCK_SIZE); dgemm_tb_naive<<<grid, block>>>(A, B, C, D, M, N, K); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #elif defined(_GPU_GEMM_OPT) int TILE_DIM = 16; dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, (M + dimBlock.y - 1)/dimBlock.y); size_t shmem_size = 2 * TILE_DIM * TILE_DIM * sizeof(double); dgemm_tb_optimized<<<dimGrid, dimBlock, shmem_size>>>(A, B, C, D, M, K, N); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); #endif #else // D = A * B^T // Matrices are stored in row-major order, but cuBLAS assumes column-major // order. We want to compute: // C = A * B^T = (A^T)^T * B^T = A'^T * B' = (B'^T * A')^T // A is MxK, B is NxK, C is MxN, D is MxN const double alpha(1.0); const double beta(0.0); // D = A * B' cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, N, M, K, &alpha, B, K, A, K, &beta, D, N); // D = C + D cublasDgeam(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, &alpha, D, N, &alpha, C, N, D, N); #endif }
ed2c8b5c72af71e8c48f67e2e5af9fc54b0cafa2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "internal.h" #include "device.h" #include <limits> #include <pcl/gpu/utils/device/algorithm.hpp> #include <pcl/gpu/utils/device/warp.hpp> //#include <pcl/gpu/utils/device/funcattrib.hpp> #include <pcl/gpu/utils/safe_call.hpp> #include <thrust/tuple.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include "thrust/device_ptr.h" #include <thrust/transform.h> #include <thrust/sort.h> #include <thrust/transform_scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/unique.h> #include <thrust/gather.h> using namespace thrust; namespace pcl { namespace device { template<bool use_max> struct IndOp { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float, int>& e1, const thrust::tuple<float, int>& e2) const { thrust::tuple<float, int> res; if (use_max) res.get<0>() = fmax(e1.get<0>(), e2.get<0>()); else res.get<0>() = fmin(e1.get<0>(), e2.get<0>()); res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>(); return res; } }; struct X { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { return thrust::tuple<float, int>(in.get<0>().x, in.get<1>()); } }; struct Y { __device__ __forceinline__ float operator()(const PointType& in) const { return in.y; } }; struct Z { __device__ __forceinline__ float operator()(const PointType& in) const { return in.z; } }; struct LineDist { float3 x1, x2; LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {} __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2); return thrust::tuple<float, int>(dist, in.get<1>()); } }; struct PlaneDist { float3 x1, n; PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1)) { float3 x2 = tr(p2), x3 = tr(p3); n = normalized(cross(x2 - x1, x3 - x1)); } __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = std::abs(dot(n, x0 - x1)); return thrust::tuple<float, int>(dist, in.get<1>()); } }; template<typename It, typename Unary, typename Init, typename Binary> int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary) { counting_iterator<int> cbeg(0); counting_iterator<int> cend = cbeg + thrust::distance(beg, end); thrust::tuple<float, int> t = transform_reduce( make_zip_iterator(thrust::make_tuple(beg, cbeg)), make_zip_iterator(thrust::make_tuple(end, cend)), unop, init, binary); return t.get<1>(); } template<typename It, typename Unary> int transform_reduce_min_index(It beg, It end, Unary unop) { thrust::tuple<float, int> min_tuple(std::numeric_limits<float>::max(), 0); return transform_reduce_index(beg, end, unop, min_tuple, IndOp<false>()); } template<typename It, typename Unary> int transform_reduce_max_index(It beg, It end, Unary unop) { thrust::tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0); return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>()); } } } pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_) { cloud_size = cloud.size(); facets_dists.create(cloud_size); perm.create(cloud_size); device_ptr<int> pbeg(perm.ptr()); thrust::sequence(pbeg, pbeg + cloud_size); } void pcl::device::PointStream::computeInitalSimplex() { device_ptr<const PointType> beg(cloud.ptr()); device_ptr<const PointType> end = beg + cloud_size; int minx = transform_reduce_min_index(beg, end, X()); int maxx = transform_reduce_max_index(beg, end, X()); PointType p1 = *(beg + minx); PointType p2 = *(beg + maxx); int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2)); PointType p3 = *(beg + maxl); int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3)); PointType p4 = *(beg + maxp); simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4); simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp; float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), maximum<float>()); float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), minimum<float>()); float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), maximum<float>()); float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), minimum<float>()); float dx = (p2.x - p1.x); float dy = (maxy - miny); float dz = (maxz - minz); cloud_diag = sqrt(dx*dx + dy*dy + dz*dz); simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1); simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2); simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3); simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4); } namespace pcl { namespace device { __global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds) { *(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1); *(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2); *(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3); } } } void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s) { hipLaunchKernelGGL(( init_fs), dim3(1), dim3(1), 0, 0, s.i1, s.i2, s.i3, s.i4, verts_inds); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); facet_count = 4; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct InitalClassify { float diag; float4 pl1, pl2, pl3, pl4; InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal) : diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4) { pl1 *= compue_inv_normal_norm(pl1); pl2 *= compue_inv_normal_norm(pl2); pl3 *= compue_inv_normal_norm(pl3); pl4 *= compue_inv_normal_norm(pl4); } __device__ __forceinline__ std::uint64_t operator()(const PointType& p) const { float4 x = p; x.w = 1; float d0 = dot(pl1, x); float d1 = dot(pl2, x); float d2 = dot(pl3, x); float d3 = dot(pl4, x); float dists[] = { d0, d1, d2, d3 }; int negs_inds[4]; int neg_count = 0; int idx = std::numeric_limits<int>::max(); float dist = 0; #pragma unroll for(int i = 0; i < 4; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { idx = negs_inds[0]; dist = diag - std::abs(dists[idx]); // to ensure that sorting order is inverse, i.e. distant points go first } //if (neg_count == 0) // then internal point ==>> idx = std::numeric_limits<int>::max() std::uint64_t res = idx; res <<= 32; return res + *reinterpret_cast<unsigned int*>(&dist); } }; __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, std::uint64_t* output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < cloud_size) output[index] = ic(points[index]); } } } void pcl::device::PointStream::initalClassify() { //thrust::device_ptr<const PointType> beg(cloud.ptr()); //thrust::device_ptr<const PointType> end = beg + cloud_size; thrust::device_ptr<std::uint64_t> out(facets_dists.ptr()); InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag); //thrust::transform(beg, end, out, ic); //printFuncAttrib(initalClassifyKernel); hipLaunchKernelGGL(( initalClassifyKernel), dim3(divUp(cloud_size, 256)), dim3(256), 0, 0, ic, cloud, cloud_size, facets_dists); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(out, out + cloud_size, pbeg); } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { __device__ int new_cloud_size; struct SearchFacetHeads { std::uint64_t *facets_dists; int cloud_size; int facet_count; int *perm; const PointType* points; mutable int* head_points; //bool logger; __device__ __forceinline__ void operator()(int facet) const { const std::uint64_t* b = facets_dists; const std::uint64_t* e = b + cloud_size; bool last_thread = facet == facet_count; int search_value = !last_thread ? facet : std::numeric_limits<int>::max(); int index = lower_bound(b, e, search_value, LessThanByFacet()) - b; if (last_thread) new_cloud_size = index; else { bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32)); head_points[facet] = not_found ? -1 : perm[index]; } } }; __global__ void searchFacetHeadsKernel(const SearchFacetHeads sfh) { int facet = threadIdx.x + blockDim.x * blockIdx.x; if (facet <= sfh.facet_count) sfh(facet); } } } int pcl::device::PointStream::searchFacetHeads(std::size_t facet_count, DeviceArray<int>& head_points) { SearchFacetHeads sfh; sfh.facets_dists = facets_dists; sfh.cloud_size = (int)cloud_size; sfh.facet_count = (int)facet_count; sfh.perm = perm; sfh.points = cloud.ptr(); sfh.head_points = head_points; //thrust::counting_iterator<int> b(0); //thrust::counting_iterator<int> e = b + facet_count + 1; //thrust::for_each(b, e, sfh); hipLaunchKernelGGL(( searchFacetHeadsKernel), dim3(divUp(facet_count+1, 256)), dim3(256), 0, 0, sfh); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int new_size; cudaSafeCall( hipMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) ); return new_size; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct NotMinus1 { __device__ __forceinline__ int operator()(const int& v) const { return (v == -1) ? 0 : 1; } }; struct Compaction { enum { CTA_SIZE = 256, WARPS = CTA_SIZE/ Warp::WARP_SIZE }; int* head_points_in; PtrStep<int> verts_inds_in; int *scan_buffer; int facet_count; mutable int* head_points_out; mutable PtrStep<int> verts_inds_out; mutable PtrStep<int> empty_facets; mutable int *empty_count; __device__ __forceinline__ void operator()() const { int idx = threadIdx.x + blockIdx.x * blockDim.x; #if CUDART_VERSION >= 9000 if (__all_sync (__activemask (), idx >= facet_count)) return; #else if (__all (idx >= facet_count)) return; #endif int empty = 0; if(idx < facet_count) { int head_idx = head_points_in[idx]; if (head_idx != -1) { int offset = scan_buffer[idx]; head_points_out[offset] = head_idx; verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx]; verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx]; verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx]; } else empty = 1; } #if CUDART_VERSION >= 9000 int total = __popc (__ballot_sync (__activemask (), empty)); #else int total = __popc (__ballot (empty)); #endif if (total > 0) { #if CUDART_VERSION >= 9000 int offset = Warp::binaryExclScan (__ballot_sync (__activemask (), empty)); #else int offset = Warp::binaryExclScan (__ballot (empty)); #endif volatile __shared__ int wapr_buffer[WARPS]; int laneid = Warp::laneId(); int warpid = Warp::id(); if (laneid == 0) { int old = atomicAdd(empty_count, total); wapr_buffer[warpid] = old; } int old = wapr_buffer[warpid]; if (empty) { empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx]; empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx]; empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx]; int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx]; } } } }; __global__ void compactionKernel( const Compaction c ) { c(); } } } void pcl::device::FacetStream::compactFacets() { int old_empty_count; empty_count.download(&old_empty_count); thrust::device_ptr<int> b(head_points.ptr()); thrust::device_ptr<int> e = b + facet_count; thrust::device_ptr<int> o(scan_buffer.ptr()); thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>()); Compaction c; c.verts_inds_in = verts_inds; c.head_points_in = head_points; c.scan_buffer = scan_buffer; c.facet_count = facet_count; c.head_points_out = head_points2; c.verts_inds_out = verts_inds2; c.empty_facets = empty_facets; c.empty_count = empty_count; int block = Compaction::CTA_SIZE; int grid = divUp(facet_count, block); hipLaunchKernelGGL(( compactionKernel), dim3(grid), dim3(block), 0, 0, c); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); verts_inds.swap(verts_inds2); head_points.swap(head_points2); int new_empty_count; empty_count.download(&new_empty_count); facet_count -= new_empty_count - old_empty_count; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct Classify { std::uint64_t* facets_dists; int* scan_buffer; int* head_points; int* perm; PtrStep<int> verts_inds; const PointType *points; float diag; int facet_count; __device__ __forceinline__ void operator()(int point_idx) const { int perm_index = perm[point_idx]; int facet = facets_dists[point_idx] >> 32; facet = scan_buffer[facet]; int hi = head_points[facet]; if (hi == perm_index) { std::uint64_t res = std::numeric_limits<int>::max(); res <<= 32; facets_dists[point_idx] = res; } else { int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; float3 hp = tr( points[ hi ] ); float3 v1 = tr( points[ i1 ] ); float3 v2 = tr( points[ i2 ] ); float3 v3 = tr( points[ i3 ] ); float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2 p0 *= compue_inv_normal_norm(p0); p1 *= compue_inv_normal_norm(p1); p2 *= compue_inv_normal_norm(p2); float4 p = points[perm_index]; p.w = 1; float d0 = dot(p, p0); float d1 = dot(p, p1); float d2 = dot(p, p2); float dists[] = { d0, d1, d2 }; int negs_inds[3]; int neg_count = 0; int new_idx = std::numeric_limits<int>::max(); float dist = 0; int indeces[] = { facet, facet + facet_count, facet + facet_count * 2 }; #pragma unroll for(int i = 0; i < 3; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { new_idx = negs_inds[0]; dist = diag - std::abs(dists[new_idx]); // to ensure that sorting order is inverse, i.e. distant points go first new_idx = indeces[new_idx]; } // if (neg_count == 0) // new_idx = std::numeric_limits<int>::max() ==>> internal point std::uint64_t res = new_idx; res <<= 32; res += *reinterpret_cast<unsigned int*>(&dist); facets_dists[point_idx] = res; } /* if (hi == perm_index) */ } }; __global__ void classifyKernel(const Classify c, int cloud_size) { int point_idx = threadIdx.x + blockIdx.x * blockDim.x; if ( point_idx < cloud_size ) c(point_idx); } } } void pcl::device::PointStream::classify(FacetStream& fs) { Classify c; c.facets_dists = facets_dists; c.scan_buffer = fs.scan_buffer; c.head_points = fs.head_points; c.perm = perm; c.verts_inds = fs.verts_inds; c.points = cloud; c.diag = cloud_diag; c.facet_count = fs.facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + cloud_size, c); hipLaunchKernelGGL(( classifyKernel), dim3(divUp(cloud_size, 256)), dim3(256), 0, 0, c, cloud_size); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); thrust::device_ptr<std::uint64_t> beg(facets_dists.ptr()); thrust::device_ptr<std::uint64_t> end = beg + cloud_size; thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(beg, end, pbeg); } namespace pcl { namespace device { struct SplitFacets { int* head_points; int facet_count; mutable PtrStep<int> verts_inds; __device__ __forceinline__ void operator()(int facet) const { int hi = head_points[facet]; int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; make_facet(hi, i1, i2, facet); make_facet(hi, i2, i3, facet + facet_count); make_facet(hi, i3, i1, facet + facet_count * 2); } __device__ __forceinline__ void make_facet(int i1, int i2, int i3, int out_idx) const { verts_inds.ptr(0)[out_idx] = i1; verts_inds.ptr(1)[out_idx] = i2; verts_inds.ptr(2)[out_idx] = i3; } }; __global__ void splitFacetsKernel(const SplitFacets sf) { int facet = threadIdx.x + blockIdx.x * blockDim.x; if (facet < sf.facet_count) sf(facet); } } } void pcl::device::FacetStream::splitFacets() { SplitFacets sf; sf.head_points = head_points; sf.verts_inds = verts_inds; sf.facet_count = facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + facet_count, sf); hipLaunchKernelGGL(( splitFacetsKernel), dim3(divUp(facet_count, 256)), dim3(256), 0, 0, sf); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); facet_count *= 3; } size_t pcl::device::remove_duplicates(DeviceArray<int>& indeces) { thrust::device_ptr<int> beg(indeces.ptr()); thrust::device_ptr<int> end = beg + indeces.size(); thrust::sort(beg, end); return (std::size_t)(thrust::unique(beg, end) - beg); } namespace pcl { namespace device { __global__ void gatherKernel(const PtrSz<int> indeces, const PointType* src, PointType* dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < indeces.size) dst[idx] = src[indeces.data[idx]]; } } } void pcl::device::pack_hull(const DeviceArray<PointType>& points, const DeviceArray<int>& indeces, DeviceArray<PointType>& output) { output.create(indeces.size()); //device_ptr<const PointType> in(points.ptr()); //thrust::device_ptr<const int> mb(indeces.ptr()); //thrust::device_ptr<const int> me = mb + indeces.size(); //device_ptr<PointType> out(output.ptr()); //thrust::gather(mb, me, in, out); hipLaunchKernelGGL(( gatherKernel), dim3(divUp(indeces.size(), 256)), dim3(256), 0, 0, indeces, points, output); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); }
ed2c8b5c72af71e8c48f67e2e5af9fc54b0cafa2.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "internal.h" #include "device.h" #include <limits> #include <pcl/gpu/utils/device/algorithm.hpp> #include <pcl/gpu/utils/device/warp.hpp> //#include <pcl/gpu/utils/device/funcattrib.hpp> #include <pcl/gpu/utils/safe_call.hpp> #include <thrust/tuple.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include "thrust/device_ptr.h" #include <thrust/transform.h> #include <thrust/sort.h> #include <thrust/transform_scan.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/unique.h> #include <thrust/gather.h> using namespace thrust; namespace pcl { namespace device { template<bool use_max> struct IndOp { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<float, int>& e1, const thrust::tuple<float, int>& e2) const { thrust::tuple<float, int> res; if (use_max) res.get<0>() = fmax(e1.get<0>(), e2.get<0>()); else res.get<0>() = fmin(e1.get<0>(), e2.get<0>()); res.get<1>() = (res.get<0>() == e1.get<0>()) ? e1.get<1>() : e2.get<1>(); return res; } }; struct X { __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { return thrust::tuple<float, int>(in.get<0>().x, in.get<1>()); } }; struct Y { __device__ __forceinline__ float operator()(const PointType& in) const { return in.y; } }; struct Z { __device__ __forceinline__ float operator()(const PointType& in) const { return in.z; } }; struct LineDist { float3 x1, x2; LineDist(const PointType& p1, const PointType& p2) : x1(tr(p1)), x2(tr(p2)) {} __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = norm(cross(x0 - x1, x0 - x2))/norm(x1 - x2); return thrust::tuple<float, int>(dist, in.get<1>()); } }; struct PlaneDist { float3 x1, n; PlaneDist(const PointType& p1, const PointType& p2, const PointType& p3) : x1(tr(p1)) { float3 x2 = tr(p2), x3 = tr(p3); n = normalized(cross(x2 - x1, x3 - x1)); } __device__ __forceinline__ thrust::tuple<float, int> operator()(const thrust::tuple<PointType, int>& in) const { float3 x0 = tr(in.get<0>()); float dist = std::abs(dot(n, x0 - x1)); return thrust::tuple<float, int>(dist, in.get<1>()); } }; template<typename It, typename Unary, typename Init, typename Binary> int transform_reduce_index(It beg, It end, Unary unop, Init init, Binary binary) { counting_iterator<int> cbeg(0); counting_iterator<int> cend = cbeg + thrust::distance(beg, end); thrust::tuple<float, int> t = transform_reduce( make_zip_iterator(thrust::make_tuple(beg, cbeg)), make_zip_iterator(thrust::make_tuple(end, cend)), unop, init, binary); return t.get<1>(); } template<typename It, typename Unary> int transform_reduce_min_index(It beg, It end, Unary unop) { thrust::tuple<float, int> min_tuple(std::numeric_limits<float>::max(), 0); return transform_reduce_index(beg, end, unop, min_tuple, IndOp<false>()); } template<typename It, typename Unary> int transform_reduce_max_index(It beg, It end, Unary unop) { thrust::tuple<float, int> max_tuple(std::numeric_limits<float>::min(), 0); return transform_reduce_index(beg, end, unop, max_tuple, IndOp<true>()); } } } pcl::device::PointStream::PointStream(const Cloud& cloud_) : cloud(cloud_) { cloud_size = cloud.size(); facets_dists.create(cloud_size); perm.create(cloud_size); device_ptr<int> pbeg(perm.ptr()); thrust::sequence(pbeg, pbeg + cloud_size); } void pcl::device::PointStream::computeInitalSimplex() { device_ptr<const PointType> beg(cloud.ptr()); device_ptr<const PointType> end = beg + cloud_size; int minx = transform_reduce_min_index(beg, end, X()); int maxx = transform_reduce_max_index(beg, end, X()); PointType p1 = *(beg + minx); PointType p2 = *(beg + maxx); int maxl = transform_reduce_max_index(beg, end, LineDist(p1, p2)); PointType p3 = *(beg + maxl); int maxp = transform_reduce_max_index(beg, end, PlaneDist(p1, p2, p3)); PointType p4 = *(beg + maxp); simplex.x1 = tr(p1); simplex.x2 = tr(p2); simplex.x3 = tr(p3); simplex.x4 = tr(p4); simplex.i1 = minx; simplex.i2 = maxx; simplex.i3 = maxl; simplex.i4 = maxp; float maxy = transform_reduce(beg, end, Y(), std::numeric_limits<float>::min(), maximum<float>()); float miny = transform_reduce(beg, end, Y(), std::numeric_limits<float>::max(), minimum<float>()); float maxz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::min(), maximum<float>()); float minz = transform_reduce(beg, end, Z(), std::numeric_limits<float>::max(), minimum<float>()); float dx = (p2.x - p1.x); float dy = (maxy - miny); float dz = (maxz - minz); cloud_diag = sqrt(dx*dx + dy*dy + dz*dz); simplex.p1 = compute_plane(simplex.x4, simplex.x2, simplex.x3, simplex.x1); simplex.p2 = compute_plane(simplex.x3, simplex.x1, simplex.x4, simplex.x2); simplex.p3 = compute_plane(simplex.x2, simplex.x1, simplex.x4, simplex.x3); simplex.p4 = compute_plane(simplex.x1, simplex.x2, simplex.x3, simplex.x4); } namespace pcl { namespace device { __global__ void init_fs(int i1, int i2, int i3, int i4, PtrStep<int> verts_inds) { *(int4*)verts_inds.ptr(0) = make_int4(i2, i1, i1, i1); *(int4*)verts_inds.ptr(1) = make_int4(i3, i3, i2, i2); *(int4*)verts_inds.ptr(2) = make_int4(i4, i4, i4, i3); } } } void pcl::device::FacetStream::setInitialFacets(const InitalSimplex& s) { init_fs<<<1, 1>>>(s.i1, s.i2, s.i3, s.i4, verts_inds); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); facet_count = 4; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct InitalClassify { float diag; float4 pl1, pl2, pl3, pl4; InitalClassify(const float4& p1, const float4& p2, const float4& p3, const float4& p4, float diagonal) : diag(diagonal), pl1(p1), pl2(p2), pl3(p3), pl4(p4) { pl1 *= compue_inv_normal_norm(pl1); pl2 *= compue_inv_normal_norm(pl2); pl3 *= compue_inv_normal_norm(pl3); pl4 *= compue_inv_normal_norm(pl4); } __device__ __forceinline__ std::uint64_t operator()(const PointType& p) const { float4 x = p; x.w = 1; float d0 = dot(pl1, x); float d1 = dot(pl2, x); float d2 = dot(pl3, x); float d3 = dot(pl4, x); float dists[] = { d0, d1, d2, d3 }; int negs_inds[4]; int neg_count = 0; int idx = std::numeric_limits<int>::max(); float dist = 0; #pragma unroll for(int i = 0; i < 4; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { idx = negs_inds[0]; dist = diag - std::abs(dists[idx]); // to ensure that sorting order is inverse, i.e. distant points go first } //if (neg_count == 0) // then internal point ==>> idx = std::numeric_limits<int>::max() std::uint64_t res = idx; res <<= 32; return res + *reinterpret_cast<unsigned int*>(&dist); } }; __global__ void initalClassifyKernel(const InitalClassify ic, const PointType* points, int cloud_size, std::uint64_t* output) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < cloud_size) output[index] = ic(points[index]); } } } void pcl::device::PointStream::initalClassify() { //thrust::device_ptr<const PointType> beg(cloud.ptr()); //thrust::device_ptr<const PointType> end = beg + cloud_size; thrust::device_ptr<std::uint64_t> out(facets_dists.ptr()); InitalClassify ic(simplex.p1, simplex.p2, simplex.p3, simplex.p4, cloud_diag); //thrust::transform(beg, end, out, ic); //printFuncAttrib(initalClassifyKernel); initalClassifyKernel<<<divUp(cloud_size, 256), 256>>>(ic, cloud, cloud_size, facets_dists); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(out, out + cloud_size, pbeg); } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { __device__ int new_cloud_size; struct SearchFacetHeads { std::uint64_t *facets_dists; int cloud_size; int facet_count; int *perm; const PointType* points; mutable int* head_points; //bool logger; __device__ __forceinline__ void operator()(int facet) const { const std::uint64_t* b = facets_dists; const std::uint64_t* e = b + cloud_size; bool last_thread = facet == facet_count; int search_value = !last_thread ? facet : std::numeric_limits<int>::max(); int index = lower_bound(b, e, search_value, LessThanByFacet()) - b; if (last_thread) new_cloud_size = index; else { bool not_found = index == cloud_size || (facet != (facets_dists[index] >> 32)); head_points[facet] = not_found ? -1 : perm[index]; } } }; __global__ void searchFacetHeadsKernel(const SearchFacetHeads sfh) { int facet = threadIdx.x + blockDim.x * blockIdx.x; if (facet <= sfh.facet_count) sfh(facet); } } } int pcl::device::PointStream::searchFacetHeads(std::size_t facet_count, DeviceArray<int>& head_points) { SearchFacetHeads sfh; sfh.facets_dists = facets_dists; sfh.cloud_size = (int)cloud_size; sfh.facet_count = (int)facet_count; sfh.perm = perm; sfh.points = cloud.ptr(); sfh.head_points = head_points; //thrust::counting_iterator<int> b(0); //thrust::counting_iterator<int> e = b + facet_count + 1; //thrust::for_each(b, e, sfh); searchFacetHeadsKernel<<<divUp(facet_count+1, 256), 256>>>(sfh); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int new_size; cudaSafeCall( cudaMemcpyFromSymbol( (void*)&new_size, pcl::device::new_cloud_size, sizeof(new_size)) ); return new_size; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct NotMinus1 { __device__ __forceinline__ int operator()(const int& v) const { return (v == -1) ? 0 : 1; } }; struct Compaction { enum { CTA_SIZE = 256, WARPS = CTA_SIZE/ Warp::WARP_SIZE }; int* head_points_in; PtrStep<int> verts_inds_in; int *scan_buffer; int facet_count; mutable int* head_points_out; mutable PtrStep<int> verts_inds_out; mutable PtrStep<int> empty_facets; mutable int *empty_count; __device__ __forceinline__ void operator()() const { int idx = threadIdx.x + blockIdx.x * blockDim.x; #if CUDART_VERSION >= 9000 if (__all_sync (__activemask (), idx >= facet_count)) return; #else if (__all (idx >= facet_count)) return; #endif int empty = 0; if(idx < facet_count) { int head_idx = head_points_in[idx]; if (head_idx != -1) { int offset = scan_buffer[idx]; head_points_out[offset] = head_idx; verts_inds_out.ptr(0)[offset] = verts_inds_in.ptr(0)[idx]; verts_inds_out.ptr(1)[offset] = verts_inds_in.ptr(1)[idx]; verts_inds_out.ptr(2)[offset] = verts_inds_in.ptr(2)[idx]; } else empty = 1; } #if CUDART_VERSION >= 9000 int total = __popc (__ballot_sync (__activemask (), empty)); #else int total = __popc (__ballot (empty)); #endif if (total > 0) { #if CUDART_VERSION >= 9000 int offset = Warp::binaryExclScan (__ballot_sync (__activemask (), empty)); #else int offset = Warp::binaryExclScan (__ballot (empty)); #endif volatile __shared__ int wapr_buffer[WARPS]; int laneid = Warp::laneId(); int warpid = Warp::id(); if (laneid == 0) { int old = atomicAdd(empty_count, total); wapr_buffer[warpid] = old; } int old = wapr_buffer[warpid]; if (empty) { empty_facets.ptr(0)[old + offset] = verts_inds_in.ptr(0)[idx]; empty_facets.ptr(1)[old + offset] = verts_inds_in.ptr(1)[idx]; empty_facets.ptr(2)[old + offset] = verts_inds_in.ptr(2)[idx]; int a1 = verts_inds_in.ptr(0)[idx], a2 = verts_inds_in.ptr(1)[idx], a3 = verts_inds_in.ptr(2)[idx]; } } } }; __global__ void compactionKernel( const Compaction c ) { c(); } } } void pcl::device::FacetStream::compactFacets() { int old_empty_count; empty_count.download(&old_empty_count); thrust::device_ptr<int> b(head_points.ptr()); thrust::device_ptr<int> e = b + facet_count; thrust::device_ptr<int> o(scan_buffer.ptr()); thrust::transform_exclusive_scan(b, e, o, NotMinus1(), 0, thrust::plus<int>()); Compaction c; c.verts_inds_in = verts_inds; c.head_points_in = head_points; c.scan_buffer = scan_buffer; c.facet_count = facet_count; c.head_points_out = head_points2; c.verts_inds_out = verts_inds2; c.empty_facets = empty_facets; c.empty_count = empty_count; int block = Compaction::CTA_SIZE; int grid = divUp(facet_count, block); compactionKernel<<<grid, block>>>(c); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); verts_inds.swap(verts_inds2); head_points.swap(head_points2); int new_empty_count; empty_count.download(&new_empty_count); facet_count -= new_empty_count - old_empty_count; } /////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////// namespace pcl { namespace device { struct Classify { std::uint64_t* facets_dists; int* scan_buffer; int* head_points; int* perm; PtrStep<int> verts_inds; const PointType *points; float diag; int facet_count; __device__ __forceinline__ void operator()(int point_idx) const { int perm_index = perm[point_idx]; int facet = facets_dists[point_idx] >> 32; facet = scan_buffer[facet]; int hi = head_points[facet]; if (hi == perm_index) { std::uint64_t res = std::numeric_limits<int>::max(); res <<= 32; facets_dists[point_idx] = res; } else { int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; float3 hp = tr( points[ hi ] ); float3 v1 = tr( points[ i1 ] ); float3 v2 = tr( points[ i2 ] ); float3 v3 = tr( points[ i3 ] ); float4 p0 = compute_plane(hp, v1, v2, /*opposite*/v3); // j float4 p1 = compute_plane(hp, v2, v3, /*opposite*/v1); // facet_count + j float4 p2 = compute_plane(hp, v3, v1, /*opposite*/v2); // facet_count + j*2 p0 *= compue_inv_normal_norm(p0); p1 *= compue_inv_normal_norm(p1); p2 *= compue_inv_normal_norm(p2); float4 p = points[perm_index]; p.w = 1; float d0 = dot(p, p0); float d1 = dot(p, p1); float d2 = dot(p, p2); float dists[] = { d0, d1, d2 }; int negs_inds[3]; int neg_count = 0; int new_idx = std::numeric_limits<int>::max(); float dist = 0; int indeces[] = { facet, facet + facet_count, facet + facet_count * 2 }; #pragma unroll for(int i = 0; i < 3; ++i) if (dists[i] < 0) negs_inds[neg_count++] = i; if (neg_count == 3) { int i1 = negs_inds[1]; int i2 = negs_inds[2]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[1] = ir; --neg_count; } if (neg_count == 2) { int i1 = negs_inds[0]; int i2 = negs_inds[1]; int ir = std::abs(dists[i1]) < std::abs(dists[i2]) ? i2 : i1; negs_inds[0] = ir; --neg_count; } if (neg_count == 1) { new_idx = negs_inds[0]; dist = diag - std::abs(dists[new_idx]); // to ensure that sorting order is inverse, i.e. distant points go first new_idx = indeces[new_idx]; } // if (neg_count == 0) // new_idx = std::numeric_limits<int>::max() ==>> internal point std::uint64_t res = new_idx; res <<= 32; res += *reinterpret_cast<unsigned int*>(&dist); facets_dists[point_idx] = res; } /* if (hi == perm_index) */ } }; __global__ void classifyKernel(const Classify c, int cloud_size) { int point_idx = threadIdx.x + blockIdx.x * blockDim.x; if ( point_idx < cloud_size ) c(point_idx); } } } void pcl::device::PointStream::classify(FacetStream& fs) { Classify c; c.facets_dists = facets_dists; c.scan_buffer = fs.scan_buffer; c.head_points = fs.head_points; c.perm = perm; c.verts_inds = fs.verts_inds; c.points = cloud; c.diag = cloud_diag; c.facet_count = fs.facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + cloud_size, c); classifyKernel<<<divUp(cloud_size, 256), 256>>>(c, cloud_size); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); thrust::device_ptr<std::uint64_t> beg(facets_dists.ptr()); thrust::device_ptr<std::uint64_t> end = beg + cloud_size; thrust::device_ptr<int> pbeg(perm.ptr()); thrust::sort_by_key(beg, end, pbeg); } namespace pcl { namespace device { struct SplitFacets { int* head_points; int facet_count; mutable PtrStep<int> verts_inds; __device__ __forceinline__ void operator()(int facet) const { int hi = head_points[facet]; int i1 = verts_inds.ptr(0)[facet]; int i2 = verts_inds.ptr(1)[facet]; int i3 = verts_inds.ptr(2)[facet]; make_facet(hi, i1, i2, facet); make_facet(hi, i2, i3, facet + facet_count); make_facet(hi, i3, i1, facet + facet_count * 2); } __device__ __forceinline__ void make_facet(int i1, int i2, int i3, int out_idx) const { verts_inds.ptr(0)[out_idx] = i1; verts_inds.ptr(1)[out_idx] = i2; verts_inds.ptr(2)[out_idx] = i3; } }; __global__ void splitFacetsKernel(const SplitFacets sf) { int facet = threadIdx.x + blockIdx.x * blockDim.x; if (facet < sf.facet_count) sf(facet); } } } void pcl::device::FacetStream::splitFacets() { SplitFacets sf; sf.head_points = head_points; sf.verts_inds = verts_inds; sf.facet_count = facet_count; //thrust::counting_iterator<int> b(0); //thrust::for_each(b, b + facet_count, sf); splitFacetsKernel<<<divUp(facet_count, 256), 256>>>(sf); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); facet_count *= 3; } size_t pcl::device::remove_duplicates(DeviceArray<int>& indeces) { thrust::device_ptr<int> beg(indeces.ptr()); thrust::device_ptr<int> end = beg + indeces.size(); thrust::sort(beg, end); return (std::size_t)(thrust::unique(beg, end) - beg); } namespace pcl { namespace device { __global__ void gatherKernel(const PtrSz<int> indeces, const PointType* src, PointType* dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < indeces.size) dst[idx] = src[indeces.data[idx]]; } } } void pcl::device::pack_hull(const DeviceArray<PointType>& points, const DeviceArray<int>& indeces, DeviceArray<PointType>& output) { output.create(indeces.size()); //device_ptr<const PointType> in(points.ptr()); //thrust::device_ptr<const int> mb(indeces.ptr()); //thrust::device_ptr<const int> me = mb + indeces.size(); //device_ptr<PointType> out(output.ptr()); //thrust::gather(mb, me, in, out); gatherKernel<<<divUp(indeces.size(), 256), 256>>>(indeces, points, output); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); }
bc7260517229196d6de45a6591d7a091ec23e052.hip
// !!! This is a file automatically generated by hipify!!! /* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the scalar-tridiagonal solver distribution. * * Copyright (c) 2015, Endre Lszl and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Endre Lszl may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Endre Lszl ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Endre Lszl BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <float.h> #include <sys/time.h> #include "trid_mpi_cuda.hpp" #include "trid_mpi_solver_params.hpp" #include "trid_common.h" #include "cutil_inline.h" #include "omp.h" //#include "offload.h" #include "mpi.h" #ifdef __MKL__ //#include "lapacke.h" #include "mkl_lapacke.h" //#include "mkl.h" #endif #include "adi_mpi.h" #include "preproc_mpi_cuda.hpp" #define ROUND_DOWN(N,step) (((N)/(step))*step) #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) extern char *optarg; extern int optind, opterr, optopt; static struct option options[] = { {"devid", required_argument, 0, 0 }, {"nx", required_argument, 0, 0 }, {"ny", required_argument, 0, 0 }, {"nz", required_argument, 0, 0 }, {"iter", required_argument, 0, 0 }, {"opt", required_argument, 0, 0 }, {"prof", required_argument, 0, 0 }, {"help", no_argument, 0, 'h' }, {0, 0, 0, 0 } }; // Function for calculating local problem size for a MPI process, as well as its // global start and end indices. void setStartEnd(int *start, int *end, int coord, int numProcs, int numElements) { int tmp = numElements / numProcs; int remainder = numElements % numProcs; int total = 0; for(int i = 0; i < coord; i++) { if(i < remainder) { total += tmp + 1; } else { total += tmp; } } *start = total; if(coord < remainder) { *end = *start + tmp; } else { *end = *start + tmp - 1; } } /* * Print essential infromation on the use of the program */ void print_help() { printf("Please specify the ADI configuration, e.g.: \n$ ./adi_* -nx NX -ny NY -nz NZ -iter ITER [-opt CUDAOPT] -prof PROF\n"); exit(0); } // Timing functions inline double elapsed_time(double *et) { struct timeval t; double old_time = *et; gettimeofday( &t, (struct timezone *)0 ); *et = t.tv_sec + t.tv_usec*1.0e-6; return *et - old_time; } inline void timing_start(double *timer) { elapsed_time(timer); } inline void timing_end(double *timer, double *elapsed_accumulate) { double elapsed = elapsed_time(timer); *elapsed_accumulate += elapsed; } // Function to add up a distributed array and print the result void rms(char* name, FP* array, app_handle &handle) { //Sum the square of values in app.h_u double sum = 0.0; for(int k = 0; k < handle.size[2]; k++) { for(int j = 0; j < handle.size[1]; j++) { for(int i = 0; i < handle.size[0]; i++) { int ind = k * handle.size[0] * handle.size[1] + j * handle.size[0] + i; sum += array[ind]; } } } double global_sum = 0.0; MPI_Allreduce(&sum, &global_sum,1, MPI_DOUBLE,MPI_SUM, handle.comm); if(handle.coords[0] == 0 && handle.coords[1] == 0 && handle.coords[2] == 0) { printf("%s sum = %.15g\n", name, global_sum); } } // Initialize the ADI application int init(app_handle &app, preproc_handle<FP> &pre_handle, int &iter, int argc, char* argv[]) { if( MPI_Init(&argc,&argv) != MPI_SUCCESS) { printf("MPI Couldn't initialize. Exiting"); exit(-1);} int devid = 0; int nx_g = 256; int ny_g = 256; int nz_g = 256; iter = 10; int opt = 0; int prof = 1; pre_handle.lambda = 1.0f; // Process arguments int opt_index = 0; while( getopt_long_only(argc, argv, "", options, &opt_index) != -1) { if(strcmp((char*)options[opt_index].name,"devid") == 0) devid = atoi(optarg); if(strcmp((char*)options[opt_index].name,"nx" ) == 0) nx_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"ny" ) == 0) ny_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"nz" ) == 0) nz_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"iter") == 0) iter = atoi(optarg); if(strcmp((char*)options[opt_index].name,"opt" ) == 0) opt = atoi(optarg); if(strcmp((char*)options[opt_index].name,"prof") == 0) prof = atoi(optarg); if(strcmp((char*)options[opt_index].name,"help") == 0) print_help(); } // Allocate memory to store problem characteristics app.size_g = (int *) calloc(3, sizeof(int)); app.size = (int *) calloc(3, sizeof(int)); app.start_g = (int *) calloc(3, sizeof(int)); app.end_g = (int *) calloc(3, sizeof(int)); app.size_g[0] = nx_g; app.size_g[1] = ny_g; app.size_g[2] = nz_g; // Set up MPI for tridiagonal solver int procs, rank; MPI_Comm_size(MPI_COMM_WORLD, &procs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Create 3D Cartesian MPI topology app.pdims = (int *) calloc(3, sizeof(int)); int *periodic = (int *) calloc(3, sizeof(int)); //false app.coords = (int *) calloc(3, sizeof(int)); MPI_Dims_create(procs, 3, app.pdims); // Setup up which GPU this MPI process is using // Currently set for 4 GPUs per node, with 1 MPI process per GPU //devid = rank % 4; cudaSafeCall( hipSetDevice(devid) ); cutilDeviceInit(argc, argv); hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); // Create 3D Cartesian MPI communicator MPI_Cart_create(MPI_COMM_WORLD, 3, app.pdims, periodic, 0, &app.comm); int my_cart_rank; MPI_Comm_rank(app.comm, &my_cart_rank); MPI_Cart_coords(app.comm, my_cart_rank, 3, app.coords); // Create MPI handle used by tridiagonal solver app.params = new MpiSolverParams(app.comm, 3, app.pdims, 32, MpiSolverParams::ALLGATHER); // Calculate local problem size for this MPI process for(int i = 0; i < 3; i++) { setStartEnd(&app.start_g[i], &app.end_g[i], app.coords[i], app.pdims[i], app.size_g[i]); app.size[i] = app.end_g[i] - app.start_g[i] + 1; } free(periodic); if(rank==0) { printf("\nGlobal grid dimensions: %d x %d x %d\n", app.size_g[0], app.size_g[1], app.size_g[2]); printf("\nNumber of MPI procs in each dimenstion %d, %d, %d\n", app.pdims[0], app.pdims[1], app.pdims[2]); } // Allocate memory for local section of problem int size = app.size[0] * app.size[1] * app.size[2]; cudaSafeCall( hipMalloc((void **)&app.a, size * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&app.b, size * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&app.c, size * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&app.d, size * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&app.u, size * sizeof(FP)) ); FP *h_u = (FP *) malloc(sizeof(FP) * size); // Initialize for(int k = 0; k < app.size[2]; k++) { for(int j = 0; j < app.size[1]; j++) { for(int i = 0; i < app.size[0]; i++) { int ind = k * app.size[0] * app.size[1] + j*app.size[0] + i; if( (app.start_g[0]==0 && i==0) || (app.end_g[0]==app.size_g[0]-1 && i==app.size[0]-1) || (app.start_g[1]==0 && j==0) || (app.end_g[1]==app.size_g[1]-1 && j==app.size[1]-1) || (app.start_g[2]==0 && k==0) || (app.end_g[2]==app.size_g[2]-1 && k==app.size[2]-1)) { h_u[ind] = 1.0f; } else { h_u[ind] = 0.0f; } } } } // Copy initial values to GPU memory cudaSafeCall( hipMemcpy(app.u, h_u, sizeof(FP) * size, hipMemcpyHostToDevice) ); free(h_u); // Allocate memory used in each iteration's preprocessing pre_handle.rcv_size_x = 2 * app.size[1] * app.size[2]; pre_handle.rcv_size_y = 2 * app.size[0] * app.size[2]; pre_handle.rcv_size_z = 2 * app.size[1] * app.size[0]; pre_handle.halo_snd_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP)); pre_handle.halo_rcv_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP)); pre_handle.halo_snd_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP)); pre_handle.halo_rcv_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP)); pre_handle.halo_snd_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP)); pre_handle.halo_rcv_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP)); cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_x, pre_handle.rcv_size_x * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_y, pre_handle.rcv_size_y * sizeof(FP)) ); cudaSafeCall( hipMalloc((void **)&pre_handle.rcv_z, pre_handle.rcv_size_z * sizeof(FP)) ); return 0; } // Free memory used void finalize(app_handle &app, preproc_handle<FP> &pre_handle) { free(pre_handle.halo_snd_x); free(pre_handle.halo_rcv_x); free(pre_handle.halo_snd_y); free(pre_handle.halo_rcv_y); free(pre_handle.halo_snd_z); free(pre_handle.halo_rcv_z); cudaSafeCall( hipFree(pre_handle.rcv_x) ); cudaSafeCall( hipFree(pre_handle.rcv_y) ); cudaSafeCall( hipFree(pre_handle.rcv_z) ); cudaSafeCall( hipFree(app.a) ); cudaSafeCall( hipFree(app.b) ); cudaSafeCall( hipFree(app.c) ); cudaSafeCall( hipFree(app.d) ); cudaSafeCall( hipFree(app.u) ); free(app.size_g); free(app.size); free(app.start_g); free(app.end_g); free(app.pdims); free(app.coords); delete app.params; } int main(int argc, char* argv[]) { app_handle app; preproc_handle<FP> pre_handle; int iter; // Initialize init(app, pre_handle, iter, argc, argv); // Declare and reset elapsed time counters double timer = 0.0; double timer1 = 0.0; double elapsed_total = 0.0; double elapsed_preproc = 0.0; double elapsed_trid_x = 0.0; double elapsed_trid_y = 0.0; double elapsed_trid_z = 0.0; timing_start(&timer1); // Allocate memory used in sums of distributed arrays FP *h_u = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]); FP *du = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]); // Iterate over specified number of time steps for(int it = 0; it < iter; it++) { // Preprocess timing_start(&timer); preproc_mpi_cuda<FP>(pre_handle, app); timing_end(&timer, &elapsed_preproc); cudaSafeCall( hipDeviceSynchronize() ); // // perform tri-diagonal solves in x-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 0, app.size, app.size); #else tridDmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 0, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_x); // // perform tri-diagonal solves in y-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 1, app.size, app.size); #else tridDmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 1, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_y); // // perform tri-diagonal solves in z-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchIncMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size); #else tridDmtsvStridedBatchIncMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_z); } timing_end(&timer1, &elapsed_total); // Print sum of these arrays (basic error checking) cudaSafeCall( hipMemcpy(h_u, app.u, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], hipMemcpyDeviceToHost) ); cudaSafeCall( hipMemcpy(du, app.d, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], hipMemcpyDeviceToHost) ); rms("end h", h_u, app); rms("end d", du, app); MPI_Barrier(MPI_COMM_WORLD); free(h_u); free(du); MPI_Barrier(MPI_COMM_WORLD); // Print out timings of each section if(app.coords[0] == 0 && app.coords[1] == 0 && app.coords[2] == 0) { // Print execution times printf("Time per section: \n[total] \t[prepro] \t[trid_x] \t[trid_y] \t[trid_z]\n"); printf("%e \t%e \t%e \t%e \t%e\n", elapsed_total, elapsed_preproc, elapsed_trid_x, elapsed_trid_y, elapsed_trid_z); } MPI_Barrier(MPI_COMM_WORLD); // Free memory finalize(app, pre_handle); MPI_Finalize(); hipDeviceReset(); return 0; }
bc7260517229196d6de45a6591d7a091ec23e052.cu
/* * Open source copyright declaration based on BSD open source template: * http://www.opensource.org/licenses/bsd-license.php * * This file is part of the scalar-tridiagonal solver distribution. * * Copyright (c) 2015, Endre László and others. Please see the AUTHORS file in * the main source directory for a full list of copyright holders. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * The name of Endre László may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Endre László ''AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Endre László BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <float.h> #include <sys/time.h> #include "trid_mpi_cuda.hpp" #include "trid_mpi_solver_params.hpp" #include "trid_common.h" #include "cutil_inline.h" #include "omp.h" //#include "offload.h" #include "mpi.h" #ifdef __MKL__ //#include "lapacke.h" #include "mkl_lapacke.h" //#include "mkl.h" #endif #include "adi_mpi.h" #include "preproc_mpi_cuda.hpp" #define ROUND_DOWN(N,step) (((N)/(step))*step) #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) extern char *optarg; extern int optind, opterr, optopt; static struct option options[] = { {"devid", required_argument, 0, 0 }, {"nx", required_argument, 0, 0 }, {"ny", required_argument, 0, 0 }, {"nz", required_argument, 0, 0 }, {"iter", required_argument, 0, 0 }, {"opt", required_argument, 0, 0 }, {"prof", required_argument, 0, 0 }, {"help", no_argument, 0, 'h' }, {0, 0, 0, 0 } }; // Function for calculating local problem size for a MPI process, as well as its // global start and end indices. void setStartEnd(int *start, int *end, int coord, int numProcs, int numElements) { int tmp = numElements / numProcs; int remainder = numElements % numProcs; int total = 0; for(int i = 0; i < coord; i++) { if(i < remainder) { total += tmp + 1; } else { total += tmp; } } *start = total; if(coord < remainder) { *end = *start + tmp; } else { *end = *start + tmp - 1; } } /* * Print essential infromation on the use of the program */ void print_help() { printf("Please specify the ADI configuration, e.g.: \n$ ./adi_* -nx NX -ny NY -nz NZ -iter ITER [-opt CUDAOPT] -prof PROF\n"); exit(0); } // Timing functions inline double elapsed_time(double *et) { struct timeval t; double old_time = *et; gettimeofday( &t, (struct timezone *)0 ); *et = t.tv_sec + t.tv_usec*1.0e-6; return *et - old_time; } inline void timing_start(double *timer) { elapsed_time(timer); } inline void timing_end(double *timer, double *elapsed_accumulate) { double elapsed = elapsed_time(timer); *elapsed_accumulate += elapsed; } // Function to add up a distributed array and print the result void rms(char* name, FP* array, app_handle &handle) { //Sum the square of values in app.h_u double sum = 0.0; for(int k = 0; k < handle.size[2]; k++) { for(int j = 0; j < handle.size[1]; j++) { for(int i = 0; i < handle.size[0]; i++) { int ind = k * handle.size[0] * handle.size[1] + j * handle.size[0] + i; sum += array[ind]; } } } double global_sum = 0.0; MPI_Allreduce(&sum, &global_sum,1, MPI_DOUBLE,MPI_SUM, handle.comm); if(handle.coords[0] == 0 && handle.coords[1] == 0 && handle.coords[2] == 0) { printf("%s sum = %.15g\n", name, global_sum); } } // Initialize the ADI application int init(app_handle &app, preproc_handle<FP> &pre_handle, int &iter, int argc, char* argv[]) { if( MPI_Init(&argc,&argv) != MPI_SUCCESS) { printf("MPI Couldn't initialize. Exiting"); exit(-1);} int devid = 0; int nx_g = 256; int ny_g = 256; int nz_g = 256; iter = 10; int opt = 0; int prof = 1; pre_handle.lambda = 1.0f; // Process arguments int opt_index = 0; while( getopt_long_only(argc, argv, "", options, &opt_index) != -1) { if(strcmp((char*)options[opt_index].name,"devid") == 0) devid = atoi(optarg); if(strcmp((char*)options[opt_index].name,"nx" ) == 0) nx_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"ny" ) == 0) ny_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"nz" ) == 0) nz_g = atoi(optarg); if(strcmp((char*)options[opt_index].name,"iter") == 0) iter = atoi(optarg); if(strcmp((char*)options[opt_index].name,"opt" ) == 0) opt = atoi(optarg); if(strcmp((char*)options[opt_index].name,"prof") == 0) prof = atoi(optarg); if(strcmp((char*)options[opt_index].name,"help") == 0) print_help(); } // Allocate memory to store problem characteristics app.size_g = (int *) calloc(3, sizeof(int)); app.size = (int *) calloc(3, sizeof(int)); app.start_g = (int *) calloc(3, sizeof(int)); app.end_g = (int *) calloc(3, sizeof(int)); app.size_g[0] = nx_g; app.size_g[1] = ny_g; app.size_g[2] = nz_g; // Set up MPI for tridiagonal solver int procs, rank; MPI_Comm_size(MPI_COMM_WORLD, &procs); MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Create 3D Cartesian MPI topology app.pdims = (int *) calloc(3, sizeof(int)); int *periodic = (int *) calloc(3, sizeof(int)); //false app.coords = (int *) calloc(3, sizeof(int)); MPI_Dims_create(procs, 3, app.pdims); // Setup up which GPU this MPI process is using // Currently set for 4 GPUs per node, with 1 MPI process per GPU //devid = rank % 4; cudaSafeCall( cudaSetDevice(devid) ); cutilDeviceInit(argc, argv); cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); // Create 3D Cartesian MPI communicator MPI_Cart_create(MPI_COMM_WORLD, 3, app.pdims, periodic, 0, &app.comm); int my_cart_rank; MPI_Comm_rank(app.comm, &my_cart_rank); MPI_Cart_coords(app.comm, my_cart_rank, 3, app.coords); // Create MPI handle used by tridiagonal solver app.params = new MpiSolverParams(app.comm, 3, app.pdims, 32, MpiSolverParams::ALLGATHER); // Calculate local problem size for this MPI process for(int i = 0; i < 3; i++) { setStartEnd(&app.start_g[i], &app.end_g[i], app.coords[i], app.pdims[i], app.size_g[i]); app.size[i] = app.end_g[i] - app.start_g[i] + 1; } free(periodic); if(rank==0) { printf("\nGlobal grid dimensions: %d x %d x %d\n", app.size_g[0], app.size_g[1], app.size_g[2]); printf("\nNumber of MPI procs in each dimenstion %d, %d, %d\n", app.pdims[0], app.pdims[1], app.pdims[2]); } // Allocate memory for local section of problem int size = app.size[0] * app.size[1] * app.size[2]; cudaSafeCall( cudaMalloc((void **)&app.a, size * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&app.b, size * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&app.c, size * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&app.d, size * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&app.u, size * sizeof(FP)) ); FP *h_u = (FP *) malloc(sizeof(FP) * size); // Initialize for(int k = 0; k < app.size[2]; k++) { for(int j = 0; j < app.size[1]; j++) { for(int i = 0; i < app.size[0]; i++) { int ind = k * app.size[0] * app.size[1] + j*app.size[0] + i; if( (app.start_g[0]==0 && i==0) || (app.end_g[0]==app.size_g[0]-1 && i==app.size[0]-1) || (app.start_g[1]==0 && j==0) || (app.end_g[1]==app.size_g[1]-1 && j==app.size[1]-1) || (app.start_g[2]==0 && k==0) || (app.end_g[2]==app.size_g[2]-1 && k==app.size[2]-1)) { h_u[ind] = 1.0f; } else { h_u[ind] = 0.0f; } } } } // Copy initial values to GPU memory cudaSafeCall( cudaMemcpy(app.u, h_u, sizeof(FP) * size, cudaMemcpyHostToDevice) ); free(h_u); // Allocate memory used in each iteration's preprocessing pre_handle.rcv_size_x = 2 * app.size[1] * app.size[2]; pre_handle.rcv_size_y = 2 * app.size[0] * app.size[2]; pre_handle.rcv_size_z = 2 * app.size[1] * app.size[0]; pre_handle.halo_snd_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP)); pre_handle.halo_rcv_x = (FP*) malloc(pre_handle.rcv_size_x * sizeof(FP)); pre_handle.halo_snd_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP)); pre_handle.halo_rcv_y = (FP*) malloc(pre_handle.rcv_size_y * sizeof(FP)); pre_handle.halo_snd_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP)); pre_handle.halo_rcv_z = (FP*) malloc(pre_handle.rcv_size_z * sizeof(FP)); cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_x, pre_handle.rcv_size_x * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_y, pre_handle.rcv_size_y * sizeof(FP)) ); cudaSafeCall( cudaMalloc((void **)&pre_handle.rcv_z, pre_handle.rcv_size_z * sizeof(FP)) ); return 0; } // Free memory used void finalize(app_handle &app, preproc_handle<FP> &pre_handle) { free(pre_handle.halo_snd_x); free(pre_handle.halo_rcv_x); free(pre_handle.halo_snd_y); free(pre_handle.halo_rcv_y); free(pre_handle.halo_snd_z); free(pre_handle.halo_rcv_z); cudaSafeCall( cudaFree(pre_handle.rcv_x) ); cudaSafeCall( cudaFree(pre_handle.rcv_y) ); cudaSafeCall( cudaFree(pre_handle.rcv_z) ); cudaSafeCall( cudaFree(app.a) ); cudaSafeCall( cudaFree(app.b) ); cudaSafeCall( cudaFree(app.c) ); cudaSafeCall( cudaFree(app.d) ); cudaSafeCall( cudaFree(app.u) ); free(app.size_g); free(app.size); free(app.start_g); free(app.end_g); free(app.pdims); free(app.coords); delete app.params; } int main(int argc, char* argv[]) { app_handle app; preproc_handle<FP> pre_handle; int iter; // Initialize init(app, pre_handle, iter, argc, argv); // Declare and reset elapsed time counters double timer = 0.0; double timer1 = 0.0; double elapsed_total = 0.0; double elapsed_preproc = 0.0; double elapsed_trid_x = 0.0; double elapsed_trid_y = 0.0; double elapsed_trid_z = 0.0; timing_start(&timer1); // Allocate memory used in sums of distributed arrays FP *h_u = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]); FP *du = (FP *) malloc(sizeof(FP) * app.size[0] * app.size[1] * app.size[2]); // Iterate over specified number of time steps for(int it = 0; it < iter; it++) { // Preprocess timing_start(&timer); preproc_mpi_cuda<FP>(pre_handle, app); timing_end(&timer, &elapsed_preproc); cudaSafeCall( cudaDeviceSynchronize() ); // // perform tri-diagonal solves in x-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 0, app.size, app.size); #else tridDmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 0, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_x); // // perform tri-diagonal solves in y-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 1, app.size, app.size); #else tridDmtsvStridedBatchMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 1, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_y); // // perform tri-diagonal solves in z-direction // timing_start(&timer); #if FPPREC == 0 tridSmtsvStridedBatchIncMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size); #else tridDmtsvStridedBatchIncMPI(*(app.params), app.a, app.b, app.c, app.d, app.u, 3, 2, app.size, app.size); #endif timing_end(&timer, &elapsed_trid_z); } timing_end(&timer1, &elapsed_total); // Print sum of these arrays (basic error checking) cudaSafeCall( cudaMemcpy(h_u, app.u, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], cudaMemcpyDeviceToHost) ); cudaSafeCall( cudaMemcpy(du, app.d, sizeof(FP) * app.size[0] * app.size[1] * app.size[2], cudaMemcpyDeviceToHost) ); rms("end h", h_u, app); rms("end d", du, app); MPI_Barrier(MPI_COMM_WORLD); free(h_u); free(du); MPI_Barrier(MPI_COMM_WORLD); // Print out timings of each section if(app.coords[0] == 0 && app.coords[1] == 0 && app.coords[2] == 0) { // Print execution times printf("Time per section: \n[total] \t[prepro] \t[trid_x] \t[trid_y] \t[trid_z]\n"); printf("%e \t%e \t%e \t%e \t%e\n", elapsed_total, elapsed_preproc, elapsed_trid_x, elapsed_trid_y, elapsed_trid_z); } MPI_Barrier(MPI_COMM_WORLD); // Free memory finalize(app, pre_handle); MPI_Finalize(); cudaDeviceReset(); return 0; }
b0cc01bdf5417c44ec47218071e1210af5a33ab5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <iostream> /** * Fill in the kernel to convert from color to greyscale * the mapping from components of a uchar4 to RGBA is: * .x -> R ; .y -> G ; .z -> B ; .w -> A * * The output (greyImage) at each pixel should be the result of * applying the formula: output = .299f * R + .587f * G + .114f * B; * Note: We will be ignoring the alpha channel for this conversion * * First create a mapping from the 2D block and grid locations * to an absolute 2D location in the image, then use that to * calculate a 1D offset */ __global__ void rgba_to_greyscale( const uchar4 * const rgbaImage, unsigned char * const greyImage) { const int GRID_Y = (blockDim.y * blockIdx.y) + threadIdx.y; const int GRID_X = (blockDim.x * blockIdx.x) + threadIdx.x; const int OFFSET_X = GRID_Y * gridDim.x * blockDim.x; const int i = OFFSET_X + GRID_X; greyImage[i] = .299f * rgbaImage[i].x + .587f * rgbaImage[i].y + .114f * rgbaImage[i].z; // I cannot. std::cout << "Kernel: pixel " << i << " = " << greyImage[i] << std::endl; } /** * https://stackoverflow.com/questions/16619274/cuda-griddim-and-blockdim * * blockDim.x,y,z number of threads in a block in the particular direction * * gridDim.x,y,z number of blocks in a grid in the particular direction * * blockDim.x * gridDim.x number of threads in a grid in the x direction * */ void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char * const d_greyImage, size_t nRows, size_t nCols) { const int N_BLOCKS = 1024; const int N_BLOCKS_PER_DIM = sqrt(N_BLOCKS); // number of threads per block (up to 512/1024 based on GPU model) const dim3 blockSize(nCols / N_BLOCKS_PER_DIM, nRows / N_BLOCKS_PER_DIM, 1); // number of blocks const dim3 gridSize(N_BLOCKS_PER_DIM, N_BLOCKS_PER_DIM, 1); // print information const size_t nPixels = nRows * nCols; const size_t nThreadsPerBlocks = (blockSize.x * blockSize.y); const size_t nBlocks = (gridSize.x * gridSize.y); const size_t nThreads = nBlocks * nThreadsPerBlocks; std::cout << "\nBlocks per dimension: " << N_BLOCKS_PER_DIM << "\nTotal Threads: " << nThreads << "\n" << "\nType \tC(x)\tR(x)\tTot\tUnit" << "\n----------\t----\t----\t---\t----" << "\nElements \t" << nCols << "\t" << nRows << "\t" << nPixels << "\tPixels" << "\nGrid Size \t" << gridSize.x << "\t" << gridSize.y << "\t" << nBlocks << "\tBlocks" << "\nBlock Size\t" << blockSize.x << "\t" << blockSize.y << "\t" << nThreadsPerBlocks << "\tThreads/Block" << "\n" << std::endl; hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
b0cc01bdf5417c44ec47218071e1210af5a33ab5.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <iostream> /** * Fill in the kernel to convert from color to greyscale * the mapping from components of a uchar4 to RGBA is: * .x -> R ; .y -> G ; .z -> B ; .w -> A * * The output (greyImage) at each pixel should be the result of * applying the formula: output = .299f * R + .587f * G + .114f * B; * Note: We will be ignoring the alpha channel for this conversion * * First create a mapping from the 2D block and grid locations * to an absolute 2D location in the image, then use that to * calculate a 1D offset */ __global__ void rgba_to_greyscale( const uchar4 * const rgbaImage, unsigned char * const greyImage) { const int GRID_Y = (blockDim.y * blockIdx.y) + threadIdx.y; const int GRID_X = (blockDim.x * blockIdx.x) + threadIdx.x; const int OFFSET_X = GRID_Y * gridDim.x * blockDim.x; const int i = OFFSET_X + GRID_X; greyImage[i] = .299f * rgbaImage[i].x + .587f * rgbaImage[i].y + .114f * rgbaImage[i].z; // I cannot. std::cout << "Kernel: pixel " << i << " = " << greyImage[i] << std::endl; } /** * https://stackoverflow.com/questions/16619274/cuda-griddim-and-blockdim * * blockDim.x,y,z number of threads in a block in the particular direction * * gridDim.x,y,z number of blocks in a grid in the particular direction * * blockDim.x * gridDim.x number of threads in a grid in the x direction * */ void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char * const d_greyImage, size_t nRows, size_t nCols) { const int N_BLOCKS = 1024; const int N_BLOCKS_PER_DIM = sqrt(N_BLOCKS); // number of threads per block (up to 512/1024 based on GPU model) const dim3 blockSize(nCols / N_BLOCKS_PER_DIM, nRows / N_BLOCKS_PER_DIM, 1); // number of blocks const dim3 gridSize(N_BLOCKS_PER_DIM, N_BLOCKS_PER_DIM, 1); // print information const size_t nPixels = nRows * nCols; const size_t nThreadsPerBlocks = (blockSize.x * blockSize.y); const size_t nBlocks = (gridSize.x * gridSize.y); const size_t nThreads = nBlocks * nThreadsPerBlocks; std::cout << "\nBlocks per dimension: " << N_BLOCKS_PER_DIM << "\nTotal Threads: " << nThreads << "\n" << "\nType \tC(x)\tR(x)\tTot\tUnit" << "\n----------\t----\t----\t---\t----" << "\nElements \t" << nCols << "\t" << nRows << "\t" << nPixels << "\tPixels" << "\nGrid Size \t" << gridSize.x << "\t" << gridSize.y << "\t" << nBlocks << "\tBlocks" << "\nBlock Size\t" << blockSize.x << "\t" << blockSize.y << "\t" << nThreadsPerBlocks << "\tThreads/Block" << "\n" << std::endl; rgba_to_greyscale<<<gridSize, blockSize>>> (d_rgbaImage, d_greyImage); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
cc3a3a9aac4012b6b73b3b136d5b03787fe6394d.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> //#include "timer.h" //#include "utils.h" #include <string> #include <stdio.h> size_t numRows(); //return # of rows in the image size_t numCols(); //return # of cols in the image void preProcess(uchar4 **h_rgbaImage, uchar4 **h_greyImage, uchar4 **d_rgbaImage, uchar4 **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4* d_greyImage, size_t numRows, size_t numCols); //include the definitions of the above functions for this homework //#include "rgb2grey.hip" int main(int argc, char **argv) { uchar4 *h_rgbaImage, *d_rgbaImage; uchar4 *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); //GpuTimer timer; //timer.Start(); //call the students' code std::cout<<"Input Size: "<<" "<<numCols()<<" x "<<numRows()<<std::endl; your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); //timer.Stop(); hipDeviceSynchronize(); //checkCudaErrors(hipGetLastError()); /* int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } */ //check results and output the grey image postProcess(output_file); return 0; }
cc3a3a9aac4012b6b73b3b136d5b03787fe6394d.cu
#include <iostream> //#include "timer.h" //#include "utils.h" #include <string> #include <stdio.h> size_t numRows(); //return # of rows in the image size_t numCols(); //return # of cols in the image void preProcess(uchar4 **h_rgbaImage, uchar4 **h_greyImage, uchar4 **d_rgbaImage, uchar4 **d_greyImage, const std::string& filename); void postProcess(const std::string& output_file); void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, uchar4* d_greyImage, size_t numRows, size_t numCols); //include the definitions of the above functions for this homework //#include "rgb2grey.cu" int main(int argc, char **argv) { uchar4 *h_rgbaImage, *d_rgbaImage; uchar4 *h_greyImage, *d_greyImage; std::string input_file; std::string output_file; if (argc == 3) { input_file = std::string(argv[1]); output_file = std::string(argv[2]); } else { std::cerr << "Usage: ./hw input_file output_file" << std::endl; exit(1); } //load the image and give us our input and output pointers preProcess(&h_rgbaImage, &h_greyImage, &d_rgbaImage, &d_greyImage, input_file); //GpuTimer timer; //timer.Start(); //call the students' code std::cout<<"Input Size: "<<" "<<numCols()<<" x "<<numRows()<<std::endl; your_rgba_to_greyscale(h_rgbaImage, d_rgbaImage, d_greyImage, numRows(), numCols()); //timer.Stop(); cudaDeviceSynchronize(); //checkCudaErrors(cudaGetLastError()); /* int err = printf("%f msecs.\n", timer.Elapsed()); if (err < 0) { //Couldn't print! Probably the student closed stdout - bad news std::cerr << "Couldn't print timing information! STDOUT Closed!" << std::endl; exit(1); } */ //check results and output the grey image postProcess(output_file); return 0; }
87430ba0549407b41412a5c3a5917e893ecf15a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file k_reorder_cuda_kernel.cu * @author Yibo Lin * @date Jan 2019 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <random> #include <assert.h> #include <chrono> #include <cmath> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/swap.h> #include <thrust/reduce.h> #include <thrust/functional.h> //#define DEBUG //#define DYNAMIC //#define TIMER #define DETERMINISTIC #include "utility/src/Msg.h" #include "utility/src/utils.cuh" #include "utility/src/limits.cuh" #include "utility/src/DetailedPlaceDB.cuh" #include "utility/src/FlatNestedVector.cuh" #include "utility/src/PitchNestedVector.cuh" #include "k_reorder/src/quick_perm.h" #include "k_reorder/src/row2node_map.h" #include "k_reorder/src/compute_independent_rows.h" #include "k_reorder/src/compute_reorder_instances.h" #include "hipcub/hipcub.hpp" DREAMPLACE_BEGIN_NAMESPACE // maximum number of cells for reordering #define MAX_K 4 // maximum number of nets per cell to be considered #define MAX_NUM_NETS_PER_NODE 20 // maximum number of nets incident to cells per instance #define MAX_NUM_NETS_PER_INSTANCE ( MAX_NUM_NETS_PER_NODE * MAX_K ) /// Concepts in the algorith: /// A group contains independent rows. /// An KReorderInstance contains an adjacent sequence of cells to be solved. /// /// a net for a reorder instance template <typename T> struct InstanceNet { int net_id; int node_marker; ///< mark cells in one instance using bit T bxl; T bxh; T pin_offset_x[MAX_K]; }; template <typename T> struct KReorderState { PitchNestedVector<int> row2node_map; int* permutations; ///< num_permutations x K int num_permutations; T* node_space_x; ///< cell size with spaces, a cell only considers its right space PitchNestedVector<KReorderInstance> reorder_instances; ///< array of array for independent instances; each instance is a sequence of at most K cells to be solved. T* costs; ///< maximum reorder_instances.size2 * num_permutations int* best_permute_id; ///< maximum reorder_instances.size2 InstanceNet<T>* instance_nets; ///< reorder_instances.size2 * MAX_NUM_NETS_PER_INSTANCE int* instance_nets_size; ///< reorder_instances.size2, number of nets for each instance int* node2inst_map; ///< map cell to instance int* net_markers; ///< whether a net is in this group unsigned char* node_markers; ///< cell offset in instance int* device_num_moved; int K; ///< number of cells to reorder double* net_hpwls; ///< used for compute HPWL }; template <typename DetailedPlaceDBType, typename StateType> inline __device__ void compute_position( const DetailedPlaceDBType& db, const StateType& state, const KReorderInstance& inst, int permute_id, typename DetailedPlaceDBType::type target_x[], typename DetailedPlaceDBType::type target_sizes[] ) { auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end - inst.idx_bgn; #ifdef DEBUG if (!(inst.idx_end-inst.idx_bgn <= state.row2node_map.size(inst.row_id))) { printf("idx_bgn %d, idx_end %d, row size %u\n", inst.idx_bgn, inst.idx_end, state.row2node_map.size(inst.row_id)); } assert(inst.idx_end-inst.idx_bgn <= state.row2node_map.size(inst.row_id)); assert(K <= MAX_K); #endif // find left boundary if (K) { int node_id = row2nodes[0]; target_x[0] = db.x[node_id]; } // record sizes, and pack to left for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; assert(node_id < db.num_movable_nodes); target_sizes[permutation[i]] = state.node_space_x[node_id]; } for (int i = 1; i < K; ++i) { target_x[i] = target_x[i-1] + target_sizes[i-1]; } } template <typename DetailedPlaceDBType, typename StateType> inline __device__ typename DetailedPlaceDBType::type compute_instance_hpwl( const DetailedPlaceDBType& db, const StateType& state, const KReorderInstance& inst, int permute_id, const int* __restrict__ row2nodes, int K, typename DetailedPlaceDBType::type target_x[], typename DetailedPlaceDBType::type target_sizes[] ) { typedef typename DetailedPlaceDBType::type T; auto check_node_exist = [&](int bgn, int end, int node_id){ for (int i = bgn; i < end; ++i) { if (row2nodes[i] == node_id) { return i; } } return cuda::numeric_limits<int>::max(); }; T row_yl = db.yl + inst.row_id*db.row_height; auto permutation = state.permutations + permute_id*state.K; T cost = 0; for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; T bxl = db.xh; T bxh = db.xl; int flag = db.net_mask[net_id]; if (flag) { int net2pin_id = db.flat_net2pin_start_map[net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[net_id+1]*flag; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; // if other node found in previous // this net has already been computed int other_node_found = check_node_exist(0, K, other_node_id); if (other_node_found < i) { break; } T other_node_xl; if (other_node_found < K) { int permuted_offset = permutation[other_node_found]; other_node_xl = target_x[permuted_offset]; } else // not found { other_node_xl = db.x[other_node_id]; if (db.y[other_node_id] == row_yl) // in the same row { if (other_node_xl < target_x[0]) // left of the segment { other_node_xl = db.xl; } else if (other_node_xl > target_x[K-1]) // right of the segment { other_node_xl = db.xh; } } } other_node_xl += db.pin_offset_x[net_pin_id]; bxl = min(bxl, other_node_xl); bxh = max(bxh, other_node_xl); } cost += bxh-bxl; } } } return cost; } template <typename DetailedPlaceDBType, typename StateType> __global__ void compute_instance_net_boxes( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } if (K > 0) { T segment_xl = db.x[row2nodes[0]]; T segment_xh = db.x[row2nodes[K-1]]; T row_yl = db.yl + inst.row_id * db.row_height; auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto instance_nets_size = state.instance_nets_size[inst_id]; for (int idx = 0; idx < instance_nets_size; ++idx) { auto& instance_net = instance_nets[idx]; instance_net.bxl = db.xh; instance_net.bxh = db.xl; int net2pin_id = db.flat_net2pin_start_map[instance_net.net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[instance_net.net_id+1]; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; if (other_node_id < db.num_nodes) // other_node_id may exceed db.num_nodes like IO pins { int other_node_found = (state.node2inst_map[other_node_id] == inst_id); if (!other_node_found) // not found { T other_node_xl = db.x[other_node_id]; auto pin_offset_x = db.pin_offset_x[net_pin_id]; if (abs(db.y[other_node_id]-row_yl) < db.row_height) // in the same row { if (other_node_xl < segment_xl) // left of the segment { other_node_xl = db.xl; } else if (other_node_xl > segment_xh) // right of the segment { other_node_xl = db.xh; } } other_node_xl += pin_offset_x; instance_net.bxl = min(instance_net.bxl, other_node_xl); instance_net.bxh = max(instance_net.bxh, other_node_xl); } } } } } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void compute_reorder_hpwl( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; __shared__ int group_size_with_permutation; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); group_size_with_permutation = group_size*state.num_permutations; } __syncthreads(); typename DetailedPlaceDBType::type target_x[MAX_K]; typename DetailedPlaceDBType::type target_sizes[MAX_K]; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size_with_permutation; i += blockDim.x * gridDim.x) { int inst_id = i/state.num_permutations; int permute_id = i - inst_id*state.num_permutations; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } int valid_flag = (K > 0); for (int idx = 0; idx < K; ++idx) { if (permutation[idx] >= K) { valid_flag = 0; break; } } if (valid_flag) { compute_position(db, state, inst, permute_id, target_x, target_sizes); //state.costs[i] = compute_instance_hpwl( // db, // state, // inst, // permute_id, // row2nodes, // K, // target_x, // target_sizes // ); T cost = 0; // consider FENCE region if (db.num_regions) { for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; int permuted_offset = permutation[idx]; T node_xl = target_x[permuted_offset]; T node_yl = db.y[node_id]; if (!db.inside_fence(node_id, node_xl, node_yl)) { cost = cuda::numeric_limits<T>::max(); break; } } } if (cost == 0) { auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto const& instance_nets_size = state.instance_nets_size[inst_id]; for (int idx = 0; idx < instance_nets_size; ++idx) { auto& instance_net = instance_nets[idx]; T bxl = instance_net.bxl; T bxh = instance_net.bxh; for (int j = 0; j < K; ++j) { int flag = (1<<j); if ((instance_net.node_marker & flag)) { int permuted_offset = permutation[j]; T other_node_xl = target_x[permuted_offset]; other_node_xl += instance_net.pin_offset_x[j]; bxl = min(bxl, other_node_xl); bxh = max(bxh, other_node_xl); } } cost += bxh-bxl; } } state.costs[i] = cost; } } } template <typename T> struct ItemWithIndex { T value; int index; }; template <typename T> struct ReduceMinOP { __host__ __device__ ItemWithIndex<T> operator()(const ItemWithIndex<T>& a, const ItemWithIndex<T>& b) const { return (a.value < b.value)? a : b; } }; template <typename T, int ThreadsPerBlock=32> __global__ void reduce_min_2d_cub(const T* __restrict__ costs, int* best_permute_id, int m, int n) { typedef hipcub::BlockReduce<ItemWithIndex<T>, ThreadsPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; auto inst_costs = costs + blockIdx.x*n; auto inst_best_permute_id = best_permute_id + blockIdx.x; ItemWithIndex<T> thread_data; thread_data.value = cuda::numeric_limits<T>::max(); thread_data.index = 0; for (int col = threadIdx.x; col < n; col += ThreadsPerBlock) { T cost = inst_costs[col]; if (cost < thread_data.value) { thread_data.value = cost; thread_data.index = col; } } __syncthreads(); // Compute the block-wide max for thread0 ItemWithIndex<T> aggregate = BlockReduce(temp_storage).Reduce(thread_data, ReduceMinOP<T>(), n); __syncthreads(); if (threadIdx.x == 0) { //printf("inst[%d] cost %g, permute_id %d\n", blockIdx.x, aggregate.value, aggregate.index); *inst_best_permute_id = aggregate.index; } } template <typename DetailedPlaceDBType, typename StateType> __global__ void apply_reorder( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); typename DetailedPlaceDBType::type target_x[MAX_K]; typename DetailedPlaceDBType::type target_sizes[MAX_K]; int target_nodes[MAX_K]; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; int permute_id = state.best_permute_id[i]; #ifdef DEBUG //printf("inst[%d].permute_id = %d\n", inst_id, permute_id); assert(permute_id < state.num_permutations); #endif // this is a copy for adding offset auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } if (K > 0) { compute_position(db, state, inst, permute_id, target_x, target_sizes); for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; target_nodes[i] = node_id; } for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; typename DetailedPlaceDBType::type xx = target_x[permutation[i]]; if (db.x[node_id] != xx) { atomicAdd(state.device_num_moved, 1); } db.x[node_id] = xx; } for (int i = 0; i < K; ++i) { row2nodes[permutation[i]] = target_nodes[i]; } } } } /// @brief Map each node to its instance. /// For each instance in the group /// For each node incident to the instance /// update node2inst_map /// update node_markers /// Every time, we solve one group with all independent instances in the group. /// For sliding window, offset can be different during iterations, /// so node2inst_map and node_markers need to be recomputed. template <typename T> __global__ void compute_node2inst_map(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id, int offset) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; // do not update for fixed cells if (node_id < db.num_movable_nodes) { state.node2inst_map[node_id] = inst_id; state.node_markers[node_id] = j; } } } } /// @brief Mark target nets for all instances in this group. template <typename T> __global__ void compute_net_markers(DetailedPlaceDB<T> db, KReorderState<T> state) { for (int node_id = blockIdx.x * blockDim.x + threadIdx.x; node_id < db.num_movable_nodes; node_id += blockDim.x * gridDim.x) { if (state.node2inst_map[node_id] < cuda::numeric_limits<int>::max()) { int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; int flag = db.net_mask[net_id]; atomicOr(state.net_markers+net_id, flag); } } } } template <typename T> __global__ void print_net_markers(DetailedPlaceDB<T> db, KReorderState<T> state) { if (blockIdx.x == 0 && threadIdx.x == 0) { for (int i = 0; i < db.num_nets; ++i) { printf("net_markers[%d] = %d\n", i, state.net_markers[i]); } } } #ifdef DETERMINISTIC /// @brief Collect information of nets belong to each instance. /// The net order is deterministic. template <typename T> __global__ void compute_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id, int offset) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); const auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; auto& instance_nets_size = state.instance_nets_size[inst_id]; auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; instance_nets_size = 0; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; int node2pin_id = db.flat_node2pin_start_map[node_id]; int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; if (state.net_markers[net_id]) { if (instance_nets_size < MAX_NUM_NETS_PER_INSTANCE) { auto& instance_net = instance_nets[instance_nets_size]; instance_net.net_id = net_id; instance_net.node_marker = (1<<state.node_markers[node_id]); instance_net.pin_offset_x[state.node_markers[node_id]] = db.pin_offset_x[node_pin_id]; instance_nets_size += 1; } } } } } } #else /// @brief Collect information of nets belong to each instance. /// The net orders may not be deterministic. template <typename T> __global__ void compute_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state) { for (int net_id = blockIdx.x * blockDim.x + threadIdx.x; net_id < db.num_nets; net_id += blockDim.x * gridDim.x) { if (state.net_markers[net_id]) { int net2pin_id = db.flat_net2pin_start_map[net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[net_id+1]; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; if (other_node_id < db.num_nodes) // other_node_id may exceed db.num_nodes like IO pins { int inst_id = state.node2inst_map[other_node_id]; if (inst_id < cuda::numeric_limits<int>::max()) { auto instance_nets_size = state.instance_nets_size + inst_id; int index = atomicAdd(instance_nets_size, 1); if (index < MAX_NUM_NETS_PER_INSTANCE) { auto& instance_net = state.instance_nets[inst_id*MAX_NUM_NETS_PER_INSTANCE + index]; instance_net.net_id = net_id; instance_net.node_marker = (1<<state.node_markers[other_node_id]); instance_net.pin_offset_x[state.node_markers[other_node_id]] = db.pin_offset_x[net_pin_id]; } // I do not expect the else condition to happen } } } } } } #endif /// @brief Remove duplicate nets in an instance. template <typename T> __global__ void unique_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; auto inst = state.reorder_instances(group_id, inst_id); auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto& instance_nets_size = state.instance_nets_size[inst_id]; for (int j = 0; j < instance_nets_size; ++j) { for (int k = j+1; k < instance_nets_size; ) { if (instance_nets[j].net_id == instance_nets[k].net_id) { // copy marker and pin offset instance_nets[j].node_marker |= instance_nets[k].node_marker; for (int l = 0; l < state.K; ++l) { if ((instance_nets[k].node_marker & (1<<l))) { instance_nets[j].pin_offset_x[l] = instance_nets[k].pin_offset_x[l]; } } --instance_nets_size; thrust::swap(instance_nets[k], instance_nets[instance_nets_size]); } else { ++k; } } } } } template <typename StateType> __global__ void print_costs(StateType state, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { printf("inst[%d][%d] costs: ", i, state.num_permutations); for (int j = 0; j < state.num_permutations; ++j) { printf("%g ", state.costs[i*state.num_permutations + j]); } printf("\n"); } } } template <typename StateType> __global__ void print_best_permute_id(StateType state, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { printf("[%d] = %d\n", i, state.best_permute_id[i]); } } } template <typename StateType> __global__ void print_instance_nets(StateType state, int group_id, int offset) { assert(blockDim.x == 1 && gridDim.x == 1); if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); int size = state.reorder_instances.size(group_id); assert(size >= 0 && size < state.reorder_instances.size2); for (int i = 0; i < size; ++i) { int instance_nets_size = state.instance_nets_size[i]; printf("inst[%d][%d] nets: ", i, instance_nets_size); assert(instance_nets_size >= 0 && instance_nets_size < MAX_NUM_NETS_PER_INSTANCE); for (int j = 0; j < instance_nets_size; ++j) { int index = i*MAX_NUM_NETS_PER_INSTANCE + j; assert(index >= 0 && index < state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE); printf("%d (%d) ", state.instance_nets[index].net_id, state.instance_nets[index].node_marker); } printf("\n"); } } } template <typename StateType> __global__ void print_instance_net_bboxes(StateType state, int group_id, int offset) { assert(blockDim.x == 1 && gridDim.x == 1); if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); int size = state.reorder_instances.size(group_id); assert(size >= 0 && size < state.reorder_instances.size2); for (int i = 0; i < size; ++i) { int instance_nets_size = state.instance_nets_size[i]; printf("inst[%d][%d] nets: ", i, instance_nets_size); assert(instance_nets_size >= 0 && instance_nets_size < MAX_NUM_NETS_PER_INSTANCE); for (int j = 0; j < instance_nets_size; ++j) { int index = i*MAX_NUM_NETS_PER_INSTANCE + j; assert(index >= 0 && index < state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE); printf("%d/%d:%g/%g ", index, j, state.instance_nets[index].bxl, state.instance_nets[index].bxh); } printf("\n"); } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void check_instance_nets(DetailedPlaceDBType db, StateType state, int group_id) { if (blockIdx.x == 0 && threadIdx.x == 0) { for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { auto const& inst = state.reorder_instances(group_id, i); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; if (db.net_mask[net_id]) { bool found = false; for (int k = 0; k < state.instance_nets_size[i]; ++k) { auto const& instance_net = state.instance_nets[i*MAX_NUM_NETS_PER_INSTANCE + k]; if (instance_net.net_id == net_id) { found = true; assert((instance_net.node_marker & (1<<j))); assert(instance_net.pin_offset_x[j] == db.pin_offset_x[node_pin_id]); break; } } assert(found); } } } } } } template <typename DetailedPlaceDBType> __global__ void print_pos(DetailedPlaceDBType db, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, pos[%d]\n", group_id, offset, db.num_movable_nodes); for (int i = 0; i < db.num_movable_nodes; ++i) { printf("[%d] = %g, %g\n", i, db.x[i], db.y[i]); } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void reset_state(DetailedPlaceDBType db, StateType state, int group_id) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; __shared__ int group_size_with_permutation; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); group_size_with_permutation = group_size*state.num_permutations; } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size_with_permutation; i += blockDim.x * gridDim.x) { state.costs[i] = cuda::numeric_limits<T>::max(); } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < state.reorder_instances.size2; i += blockDim.x * gridDim.x) { state.instance_nets_size[i] = 0; } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < db.num_nodes; i += blockDim.x * gridDim.x) { state.node_markers[i] = 0; state.node2inst_map[i] = cuda::numeric_limits<int>::max(); } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < db.num_nets; i += blockDim.x * gridDim.x) { state.net_markers[i] = 0; } int instance_nets_size = state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < instance_nets_size; i += blockDim.x * gridDim.x) { auto& instance_net = state.instance_nets[i]; instance_net.net_id = cuda::numeric_limits<int>::max(); instance_net.node_marker = 0; instance_net.bxl = db.xh; instance_net.bxh = db.xl; for (int j = 0; j < MAX_K; ++j) { instance_net.pin_offset_x[j] = 0; } } } #ifdef DYNAMIC #define CEILDIV CUDACeilDiv #else #define CEILDIV CPUCeilDiv #endif template <typename T> #ifdef DYNAMIC __global__ void k_reorder(DetailedPlaceDB<T> db, KReorderState<T> state) #else void k_reorder(DetailedPlaceDB<T>& db, KReorderState<T>& state, const std::vector<std::vector<KReorderInstance> >& host_reorder_instances) #endif { #ifdef TIMER hr_clock_rep timer_start, timer_stop; hr_clock_rep enumeration_time = 0, apply_reorder_time = 0; int enumeration_runs = 0, apply_reorder_runs = 0; #endif for (int group_id = 0; group_id < state.reorder_instances.size1; ++group_id) { #ifdef DYNAMIC int group_size = state.reorder_instances.size(group_id); #else dreamplaceAssert(state.reorder_instances.size1 == host_reorder_instances.size()); int group_size = host_reorder_instances[group_id].size(); #endif if (group_size) { for (int offset = 0; offset < state.K; offset += state.K/2) { #ifdef TIMER timer_start = get_globaltime(); #endif hipLaunchKernelGGL(( reset_state), dim3(64), dim3(512), 0, 0, db, state, group_id); hipLaunchKernelGGL(( compute_node2inst_map), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id, offset); hipLaunchKernelGGL(( compute_net_markers), dim3(CEILDIV(db.num_movable_nodes, 256)), dim3(256), 0, 0, db, state); //print_net_markers<<<1, 1>>>(db, state); #ifdef DETERMINISTIC hipLaunchKernelGGL(( compute_instance_nets), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id, offset); #else hipLaunchKernelGGL(( compute_instance_nets), dim3(CEILDIV(db.num_nets, 256)), dim3(256), 0, 0, db, state); #endif //print_instance_nets<<<1, 1>>>(state, group_id); hipLaunchKernelGGL(( unique_instance_nets), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id); //print_instance_nets<<<1, 1>>>(state, group_id, offset); //check_instance_nets<<<1, 1>>>(db, state, group_id); hipLaunchKernelGGL(( compute_instance_net_boxes), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id, offset); //print_instance_net_bboxes<<<1, 1>>>(state, group_id, offset); hipLaunchKernelGGL(( compute_reorder_hpwl), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id, offset); #ifdef TIMER checkCUDA(hipDeviceSynchronize()); timer_stop = get_globaltime(); enumeration_time += timer_stop-timer_start; enumeration_runs += 1; #endif #ifdef TIMER timer_start = get_globaltime(); #endif //print_costs<<<1, 1>>>(state, group_id, offset); hipLaunchKernelGGL(( reduce_min_2d_cub<T, 32>), dim3(group_size), dim3(32), 0, 0, state.costs, state.best_permute_id, group_size, state.num_permutations); //print_best_permute_id<<<1, 1>>>(state, group_id, offset); hipLaunchKernelGGL(( apply_reorder), dim3(CEILDIV(group_size, 256)), dim3(256), 0, 0, db, state, group_id, offset); #ifdef TIMER checkCUDA(hipDeviceSynchronize()); timer_stop = get_globaltime(); apply_reorder_time += timer_stop-timer_start; apply_reorder_runs += 1; #endif //print_pos<<<1, 1>>>(db, group_id, offset); } } } #ifdef TIMER dreamplacePrint(kDEBUG, "enumeration takes %g ms for %d runs, average %g ms\n", get_timer_period()*enumeration_time, enumeration_runs, get_timer_period()*enumeration_time/enumeration_runs); dreamplacePrint(kDEBUG, "apply_reorder takes %g ms for %d runs, average %g ms\n", get_timer_period()*apply_reorder_time, apply_reorder_runs, get_timer_period()*apply_reorder_time/apply_reorder_runs); #endif } template <typename T> int kreorderCUDALauncher(DetailedPlaceDB<T> db, int K, int max_iters, int num_threads) { dreamplacePrint(kDEBUG, "%d-reorder\n", K); hr_clock_rep total_time_start, total_time_stop; hr_clock_rep kernel_time_start, kernel_time_stop; hr_clock_rep iter_time_start, iter_time_stop; total_time_start = get_globaltime(); const float stop_threshold = 0.1/100; // fix random seed std::srand(1000); KReorderState<T> state; DetailedPlaceDB<T> cpu_db; state.K = K; // distribute cells to rows on host // copy cell locations from device to host std::vector<std::vector<int> > host_row2node_map (db.num_sites_y); std::vector<T> host_node_space_x (db.num_movable_nodes); std::vector<std::vector<int> > host_permutations = quick_perm(K); std::vector<unsigned char> host_adjacency_matrix; std::vector<std::vector<int> > host_row_graph; std::vector<std::vector<int> > host_independent_rows; std::vector<std::vector<KReorderInstance> > host_reorder_instances; dreamplacePrint(kDEBUG, "%lu permutations\n", host_permutations.size()); // initialize cpu db from db { iter_time_start = get_globaltime(); cpu_db.xl = db.xl; cpu_db.yl = db.yl; cpu_db.xh = db.xh; cpu_db.yh = db.yh; cpu_db.site_width = db.site_width; cpu_db.row_height = db.row_height; cpu_db.bin_size_x = db.bin_size_x; cpu_db.bin_size_y = db.bin_size_y; cpu_db.num_bins_x = db.num_bins_x; cpu_db.num_bins_y = db.num_bins_y; cpu_db.num_sites_x = db.num_sites_x; cpu_db.num_sites_y = db.num_sites_y; cpu_db.num_nodes = db.num_nodes; cpu_db.num_movable_nodes = db.num_movable_nodes; cpu_db.num_nets = db.num_nets; cpu_db.num_pins = db.num_pins; allocateCopyCPU(cpu_db.net_mask, db.net_mask, db.num_nets, unsigned char); allocateCopyCPU(cpu_db.flat_net2pin_start_map, db.flat_net2pin_start_map, db.num_nets+1, int); allocateCopyCPU(cpu_db.flat_net2pin_map, db.flat_net2pin_map, db.num_pins, int); allocateCopyCPU(cpu_db.pin2node_map, db.pin2node_map, db.num_pins, int); allocateCopyCPU(cpu_db.x, db.x, db.num_nodes, T); allocateCopyCPU(cpu_db.y, db.y, db.num_nodes, T); allocateCopyCPU(cpu_db.node_size_x, db.node_size_x, db.num_nodes, T); allocateCopyCPU(cpu_db.node_size_y, db.node_size_y, db.num_nodes, T); make_row2node_map(cpu_db, cpu_db.x, cpu_db.y, host_row2node_map, num_threads); host_node_space_x.resize(cpu_db.num_movable_nodes); for (int i = 0; i < cpu_db.num_sites_y; ++i) { for (unsigned int j = 0; j < host_row2node_map.at(i).size(); ++j) { int node_id = host_row2node_map[i][j]; if (node_id < db.num_movable_nodes) { auto& space = host_node_space_x[node_id]; T space_xl = cpu_db.x[node_id]; T space_xh = cpu_db.xh; if (j+1 < host_row2node_map[i].size()) { int right_node_id = host_row2node_map[i][j+1]; space_xh = min(space_xh, cpu_db.x[right_node_id]); } space = space_xh-space_xl; // align space to sites, as I assume space_xl aligns to sites // I also assume node width should be integral numbers of sites space = floor(space / db.site_width) * db.site_width; T node_size_x = cpu_db.node_size_x[node_id]; dreamplaceAssertMsg(space >= node_size_x, "space %g, node_size_x[%d] %g, original space (%g, %g), site_width %g", space, node_id, node_size_x, space_xl, space_xh, db.site_width); } #ifdef DEBUG if (node_id < cpu_db.num_movable_nodes) { dreamplaceAssert(space >= cpu_db.node_size_x[node_id]); } #endif } } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "initializing CPU DB takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); iter_time_start = get_globaltime(); compute_row_conflict_graph(cpu_db, host_row2node_map, host_adjacency_matrix, host_row_graph, num_threads); compute_independent_rows(cpu_db, host_row_graph, host_independent_rows); iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "computing independent rows takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); iter_time_start = get_globaltime(); compute_reorder_instances(cpu_db, host_row2node_map, host_independent_rows, host_reorder_instances, state.K); iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "computing reorder instances takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); } // initialize cuda state iter_time_start = get_globaltime(); { allocateCopyCUDA(state.node_space_x, host_node_space_x.data(), db.num_movable_nodes); std::vector<int> host_permutations_flat (host_permutations.size()*K); for (unsigned int i = 0; i < host_permutations.size(); ++i) { std::copy(host_permutations[i].begin(), host_permutations[i].end(), host_permutations_flat.begin() + i*K); } state.num_permutations = host_permutations.size(); allocateCopyCUDA(state.permutations, host_permutations_flat.data(), state.num_permutations*state.K); state.row2node_map.initialize(host_row2node_map); state.reorder_instances.initialize(host_reorder_instances); allocateCUDA(state.costs, state.reorder_instances.size2*state.num_permutations, T); allocateCUDA(state.best_permute_id, state.reorder_instances.size2, int); allocateCUDA(state.instance_nets, state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE, InstanceNet<T>); allocateCUDA(state.instance_nets_size, state.reorder_instances.size2, int); allocateCUDA(state.node2inst_map, db.num_nodes, int); allocateCUDA(state.net_markers, db.num_nets, int); allocateCUDA(state.node_markers, db.num_nodes, unsigned char); allocateCUDA(state.device_num_moved, 1, int); allocateCUDA(state.net_hpwls, db.num_nets, long); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "initializing CUDA state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); kernel_time_start = get_globaltime(); double hpwls [max_iters+1]; hpwls[0] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls); dreamplacePrint(kINFO, "initial hpwl = %.3f\n", hpwls[0]); for (int iter = 0; iter < max_iters; ++iter) { iter_time_start = get_globaltime(); k_reorder #ifdefhipLaunchKernelGGL(( DYNAMIC) , dim3(1), dim3(1), 0, 0, db, state); #else (db, state, host_reorder_instances); #endif checkCUDA(hipDeviceSynchronize()); iter_time_stop = get_globaltime(); dreamplacePrint(kINFO, "Iteration time(ms) \t %g\n", get_timer_period() * (iter_time_stop - iter_time_start)); hpwls[iter+1] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls); dreamplacePrint(kINFO, "iteration %d: hpwl %.3f => %.3f (imp. %g%%)\n", iter, hpwls[0], hpwls[iter+1], (1.0-hpwls[iter+1]/(double)hpwls[0])*100); if ((iter&1) && hpwls[iter]-hpwls[iter-1] > -stop_threshold*hpwls[0]) { break; } } checkCUDA(hipDeviceSynchronize()); kernel_time_stop = get_globaltime(); // destroy cuda state iter_time_start = get_globaltime(); { destroyCUDA(state.node_space_x); destroyCUDA(state.permutations); state.row2node_map.destroy(); state.reorder_instances.destroy(); destroyCUDA(state.costs); destroyCUDA(state.best_permute_id); destroyCUDA(state.instance_nets); destroyCUDA(state.instance_nets_size); destroyCUDA(state.node2inst_map); destroyCUDA(state.net_markers); destroyCUDA(state.node_markers); destroyCUDA(state.device_num_moved); destroyCUDA(state.net_hpwls); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "destroying CUDA state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); total_time_stop = get_globaltime(); // destroy cpu db iter_time_start = get_globaltime(); { destroyCPU(cpu_db.net_mask); destroyCPU(cpu_db.flat_net2pin_start_map); destroyCPU(cpu_db.flat_net2pin_map); destroyCPU(cpu_db.pin2node_map); destroyCPU(cpu_db.x); destroyCPU(cpu_db.y); destroyCPU(cpu_db.node_size_x); destroyCPU(cpu_db.node_size_y); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "destroying CPU state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); dreamplacePrint(kINFO, "Kernel time: %g ms\n", get_timer_period()*(kernel_time_stop-kernel_time_start)); dreamplacePrint(kINFO, "K-reorder time: %g ms\n", get_timer_period()*(total_time_stop-total_time_start)); return 0; } #define REGISTER_KERNEL_LAUNCHER(T) \ void instantiateKReorderCUDALauncher(\ DetailedPlaceDB<T> db, \ int K, \ int max_iters, \ int num_threads \ )\ {\ kreorderCUDALauncher<T>(\ db, \ K, \ max_iters, \ num_threads \ );\ } REGISTER_KERNEL_LAUNCHER(int); REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
87430ba0549407b41412a5c3a5917e893ecf15a4.cu
/** * @file k_reorder_cuda_kernel.cu * @author Yibo Lin * @date Jan 2019 */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <random> #include <assert.h> #include <chrono> #include <cmath> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/swap.h> #include <thrust/reduce.h> #include <thrust/functional.h> //#define DEBUG //#define DYNAMIC //#define TIMER #define DETERMINISTIC #include "utility/src/Msg.h" #include "utility/src/utils.cuh" #include "utility/src/limits.cuh" #include "utility/src/DetailedPlaceDB.cuh" #include "utility/src/FlatNestedVector.cuh" #include "utility/src/PitchNestedVector.cuh" #include "k_reorder/src/quick_perm.h" #include "k_reorder/src/row2node_map.h" #include "k_reorder/src/compute_independent_rows.h" #include "k_reorder/src/compute_reorder_instances.h" #include "cub/cub.cuh" DREAMPLACE_BEGIN_NAMESPACE // maximum number of cells for reordering #define MAX_K 4 // maximum number of nets per cell to be considered #define MAX_NUM_NETS_PER_NODE 20 // maximum number of nets incident to cells per instance #define MAX_NUM_NETS_PER_INSTANCE ( MAX_NUM_NETS_PER_NODE * MAX_K ) /// Concepts in the algorith: /// A group contains independent rows. /// An KReorderInstance contains an adjacent sequence of cells to be solved. /// /// a net for a reorder instance template <typename T> struct InstanceNet { int net_id; int node_marker; ///< mark cells in one instance using bit T bxl; T bxh; T pin_offset_x[MAX_K]; }; template <typename T> struct KReorderState { PitchNestedVector<int> row2node_map; int* permutations; ///< num_permutations x K int num_permutations; T* node_space_x; ///< cell size with spaces, a cell only considers its right space PitchNestedVector<KReorderInstance> reorder_instances; ///< array of array for independent instances; each instance is a sequence of at most K cells to be solved. T* costs; ///< maximum reorder_instances.size2 * num_permutations int* best_permute_id; ///< maximum reorder_instances.size2 InstanceNet<T>* instance_nets; ///< reorder_instances.size2 * MAX_NUM_NETS_PER_INSTANCE int* instance_nets_size; ///< reorder_instances.size2, number of nets for each instance int* node2inst_map; ///< map cell to instance int* net_markers; ///< whether a net is in this group unsigned char* node_markers; ///< cell offset in instance int* device_num_moved; int K; ///< number of cells to reorder double* net_hpwls; ///< used for compute HPWL }; template <typename DetailedPlaceDBType, typename StateType> inline __device__ void compute_position( const DetailedPlaceDBType& db, const StateType& state, const KReorderInstance& inst, int permute_id, typename DetailedPlaceDBType::type target_x[], typename DetailedPlaceDBType::type target_sizes[] ) { auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end - inst.idx_bgn; #ifdef DEBUG if (!(inst.idx_end-inst.idx_bgn <= state.row2node_map.size(inst.row_id))) { printf("idx_bgn %d, idx_end %d, row size %u\n", inst.idx_bgn, inst.idx_end, state.row2node_map.size(inst.row_id)); } assert(inst.idx_end-inst.idx_bgn <= state.row2node_map.size(inst.row_id)); assert(K <= MAX_K); #endif // find left boundary if (K) { int node_id = row2nodes[0]; target_x[0] = db.x[node_id]; } // record sizes, and pack to left for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; assert(node_id < db.num_movable_nodes); target_sizes[permutation[i]] = state.node_space_x[node_id]; } for (int i = 1; i < K; ++i) { target_x[i] = target_x[i-1] + target_sizes[i-1]; } } template <typename DetailedPlaceDBType, typename StateType> inline __device__ typename DetailedPlaceDBType::type compute_instance_hpwl( const DetailedPlaceDBType& db, const StateType& state, const KReorderInstance& inst, int permute_id, const int* __restrict__ row2nodes, int K, typename DetailedPlaceDBType::type target_x[], typename DetailedPlaceDBType::type target_sizes[] ) { typedef typename DetailedPlaceDBType::type T; auto check_node_exist = [&](int bgn, int end, int node_id){ for (int i = bgn; i < end; ++i) { if (row2nodes[i] == node_id) { return i; } } return cuda::numeric_limits<int>::max(); }; T row_yl = db.yl + inst.row_id*db.row_height; auto permutation = state.permutations + permute_id*state.K; T cost = 0; for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; T bxl = db.xh; T bxh = db.xl; int flag = db.net_mask[net_id]; if (flag) { int net2pin_id = db.flat_net2pin_start_map[net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[net_id+1]*flag; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; // if other node found in previous // this net has already been computed int other_node_found = check_node_exist(0, K, other_node_id); if (other_node_found < i) { break; } T other_node_xl; if (other_node_found < K) { int permuted_offset = permutation[other_node_found]; other_node_xl = target_x[permuted_offset]; } else // not found { other_node_xl = db.x[other_node_id]; if (db.y[other_node_id] == row_yl) // in the same row { if (other_node_xl < target_x[0]) // left of the segment { other_node_xl = db.xl; } else if (other_node_xl > target_x[K-1]) // right of the segment { other_node_xl = db.xh; } } } other_node_xl += db.pin_offset_x[net_pin_id]; bxl = min(bxl, other_node_xl); bxh = max(bxh, other_node_xl); } cost += bxh-bxl; } } } return cost; } template <typename DetailedPlaceDBType, typename StateType> __global__ void compute_instance_net_boxes( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } if (K > 0) { T segment_xl = db.x[row2nodes[0]]; T segment_xh = db.x[row2nodes[K-1]]; T row_yl = db.yl + inst.row_id * db.row_height; auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto instance_nets_size = state.instance_nets_size[inst_id]; for (int idx = 0; idx < instance_nets_size; ++idx) { auto& instance_net = instance_nets[idx]; instance_net.bxl = db.xh; instance_net.bxh = db.xl; int net2pin_id = db.flat_net2pin_start_map[instance_net.net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[instance_net.net_id+1]; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; if (other_node_id < db.num_nodes) // other_node_id may exceed db.num_nodes like IO pins { int other_node_found = (state.node2inst_map[other_node_id] == inst_id); if (!other_node_found) // not found { T other_node_xl = db.x[other_node_id]; auto pin_offset_x = db.pin_offset_x[net_pin_id]; if (abs(db.y[other_node_id]-row_yl) < db.row_height) // in the same row { if (other_node_xl < segment_xl) // left of the segment { other_node_xl = db.xl; } else if (other_node_xl > segment_xh) // right of the segment { other_node_xl = db.xh; } } other_node_xl += pin_offset_x; instance_net.bxl = min(instance_net.bxl, other_node_xl); instance_net.bxh = max(instance_net.bxh, other_node_xl); } } } } } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void compute_reorder_hpwl( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; __shared__ int group_size_with_permutation; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); group_size_with_permutation = group_size*state.num_permutations; } __syncthreads(); typename DetailedPlaceDBType::type target_x[MAX_K]; typename DetailedPlaceDBType::type target_sizes[MAX_K]; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size_with_permutation; i += blockDim.x * gridDim.x) { int inst_id = i/state.num_permutations; int permute_id = i - inst_id*state.num_permutations; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } int valid_flag = (K > 0); for (int idx = 0; idx < K; ++idx) { if (permutation[idx] >= K) { valid_flag = 0; break; } } if (valid_flag) { compute_position(db, state, inst, permute_id, target_x, target_sizes); //state.costs[i] = compute_instance_hpwl( // db, // state, // inst, // permute_id, // row2nodes, // K, // target_x, // target_sizes // ); T cost = 0; // consider FENCE region if (db.num_regions) { for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; int permuted_offset = permutation[idx]; T node_xl = target_x[permuted_offset]; T node_yl = db.y[node_id]; if (!db.inside_fence(node_id, node_xl, node_yl)) { cost = cuda::numeric_limits<T>::max(); break; } } } if (cost == 0) { auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto const& instance_nets_size = state.instance_nets_size[inst_id]; for (int idx = 0; idx < instance_nets_size; ++idx) { auto& instance_net = instance_nets[idx]; T bxl = instance_net.bxl; T bxh = instance_net.bxh; for (int j = 0; j < K; ++j) { int flag = (1<<j); if ((instance_net.node_marker & flag)) { int permuted_offset = permutation[j]; T other_node_xl = target_x[permuted_offset]; other_node_xl += instance_net.pin_offset_x[j]; bxl = min(bxl, other_node_xl); bxh = max(bxh, other_node_xl); } } cost += bxh-bxl; } } state.costs[i] = cost; } } } template <typename T> struct ItemWithIndex { T value; int index; }; template <typename T> struct ReduceMinOP { __host__ __device__ ItemWithIndex<T> operator()(const ItemWithIndex<T>& a, const ItemWithIndex<T>& b) const { return (a.value < b.value)? a : b; } }; template <typename T, int ThreadsPerBlock=32> __global__ void reduce_min_2d_cub(const T* __restrict__ costs, int* best_permute_id, int m, int n) { typedef cub::BlockReduce<ItemWithIndex<T>, ThreadsPerBlock> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; auto inst_costs = costs + blockIdx.x*n; auto inst_best_permute_id = best_permute_id + blockIdx.x; ItemWithIndex<T> thread_data; thread_data.value = cuda::numeric_limits<T>::max(); thread_data.index = 0; for (int col = threadIdx.x; col < n; col += ThreadsPerBlock) { T cost = inst_costs[col]; if (cost < thread_data.value) { thread_data.value = cost; thread_data.index = col; } } __syncthreads(); // Compute the block-wide max for thread0 ItemWithIndex<T> aggregate = BlockReduce(temp_storage).Reduce(thread_data, ReduceMinOP<T>(), n); __syncthreads(); if (threadIdx.x == 0) { //printf("inst[%d] cost %g, permute_id %d\n", blockIdx.x, aggregate.value, aggregate.index); *inst_best_permute_id = aggregate.index; } } template <typename DetailedPlaceDBType, typename StateType> __global__ void apply_reorder( DetailedPlaceDBType db, StateType state, int group_id, int offset ) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); typename DetailedPlaceDBType::type target_x[MAX_K]; typename DetailedPlaceDBType::type target_sizes[MAX_K]; int target_nodes[MAX_K]; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; int permute_id = state.best_permute_id[i]; #ifdef DEBUG //printf("inst[%d].permute_id = %d\n", inst_id, permute_id); assert(permute_id < state.num_permutations); #endif // this is a copy for adding offset auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; auto permutation = state.permutations + permute_id*state.K; int K = inst.idx_end-inst.idx_bgn; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } if (K > 0) { compute_position(db, state, inst, permute_id, target_x, target_sizes); for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; target_nodes[i] = node_id; } for (int i = 0; i < K; ++i) { int node_id = row2nodes[i]; typename DetailedPlaceDBType::type xx = target_x[permutation[i]]; if (db.x[node_id] != xx) { atomicAdd(state.device_num_moved, 1); } db.x[node_id] = xx; } for (int i = 0; i < K; ++i) { row2nodes[permutation[i]] = target_nodes[i]; } } } } /// @brief Map each node to its instance. /// For each instance in the group /// For each node incident to the instance /// update node2inst_map /// update node_markers /// Every time, we solve one group with all independent instances in the group. /// For sliding window, offset can be different during iterations, /// so node2inst_map and node_markers need to be recomputed. template <typename T> __global__ void compute_node2inst_map(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id, int offset) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; // do not update for fixed cells if (node_id < db.num_movable_nodes) { state.node2inst_map[node_id] = inst_id; state.node_markers[node_id] = j; } } } } /// @brief Mark target nets for all instances in this group. template <typename T> __global__ void compute_net_markers(DetailedPlaceDB<T> db, KReorderState<T> state) { for (int node_id = blockIdx.x * blockDim.x + threadIdx.x; node_id < db.num_movable_nodes; node_id += blockDim.x * gridDim.x) { if (state.node2inst_map[node_id] < cuda::numeric_limits<int>::max()) { int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; int flag = db.net_mask[net_id]; atomicOr(state.net_markers+net_id, flag); } } } } template <typename T> __global__ void print_net_markers(DetailedPlaceDB<T> db, KReorderState<T> state) { if (blockIdx.x == 0 && threadIdx.x == 0) { for (int i = 0; i < db.num_nets; ++i) { printf("net_markers[%d] = %d\n", i, state.net_markers[i]); } } } #ifdef DETERMINISTIC /// @brief Collect information of nets belong to each instance. /// The net order is deterministic. template <typename T> __global__ void compute_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id, int offset) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; // this is a copy auto inst = state.reorder_instances(group_id, inst_id); inst.idx_bgn += offset; inst.idx_end = min(inst.idx_end+offset, state.row2node_map.size(inst.row_id)); const auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; auto& instance_nets_size = state.instance_nets_size[inst_id]; auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; instance_nets_size = 0; // after adding offset for (int idx = 0; idx < K; ++idx) { int node_id = row2nodes[idx]; if (node_id >= db.num_movable_nodes || db.node_size_y[node_id] > db.row_height) { inst.idx_end = inst.idx_bgn+idx; K = idx; break; } } for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; int node2pin_id = db.flat_node2pin_start_map[node_id]; int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; if (state.net_markers[net_id]) { if (instance_nets_size < MAX_NUM_NETS_PER_INSTANCE) { auto& instance_net = instance_nets[instance_nets_size]; instance_net.net_id = net_id; instance_net.node_marker = (1<<state.node_markers[node_id]); instance_net.pin_offset_x[state.node_markers[node_id]] = db.pin_offset_x[node_pin_id]; instance_nets_size += 1; } } } } } } #else /// @brief Collect information of nets belong to each instance. /// The net orders may not be deterministic. template <typename T> __global__ void compute_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state) { for (int net_id = blockIdx.x * blockDim.x + threadIdx.x; net_id < db.num_nets; net_id += blockDim.x * gridDim.x) { if (state.net_markers[net_id]) { int net2pin_id = db.flat_net2pin_start_map[net_id]; const int net2pin_id_end = db.flat_net2pin_start_map[net_id+1]; for (; net2pin_id < net2pin_id_end; ++net2pin_id) { int net_pin_id = db.flat_net2pin_map[net2pin_id]; int other_node_id = db.pin2node_map[net_pin_id]; if (other_node_id < db.num_nodes) // other_node_id may exceed db.num_nodes like IO pins { int inst_id = state.node2inst_map[other_node_id]; if (inst_id < cuda::numeric_limits<int>::max()) { auto instance_nets_size = state.instance_nets_size + inst_id; int index = atomicAdd(instance_nets_size, 1); if (index < MAX_NUM_NETS_PER_INSTANCE) { auto& instance_net = state.instance_nets[inst_id*MAX_NUM_NETS_PER_INSTANCE + index]; instance_net.net_id = net_id; instance_net.node_marker = (1<<state.node_markers[other_node_id]); instance_net.pin_offset_x[state.node_markers[other_node_id]] = db.pin_offset_x[net_pin_id]; } // I do not expect the else condition to happen } } } } } } #endif /// @brief Remove duplicate nets in an instance. template <typename T> __global__ void unique_instance_nets(DetailedPlaceDB<T> db, KReorderState<T> state, int group_id) { __shared__ int group_size; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size; i += blockDim.x * gridDim.x) { int inst_id = i; auto inst = state.reorder_instances(group_id, inst_id); auto instance_nets = state.instance_nets + inst_id*MAX_NUM_NETS_PER_INSTANCE; auto& instance_nets_size = state.instance_nets_size[inst_id]; for (int j = 0; j < instance_nets_size; ++j) { for (int k = j+1; k < instance_nets_size; ) { if (instance_nets[j].net_id == instance_nets[k].net_id) { // copy marker and pin offset instance_nets[j].node_marker |= instance_nets[k].node_marker; for (int l = 0; l < state.K; ++l) { if ((instance_nets[k].node_marker & (1<<l))) { instance_nets[j].pin_offset_x[l] = instance_nets[k].pin_offset_x[l]; } } --instance_nets_size; thrust::swap(instance_nets[k], instance_nets[instance_nets_size]); } else { ++k; } } } } } template <typename StateType> __global__ void print_costs(StateType state, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { printf("inst[%d][%d] costs: ", i, state.num_permutations); for (int j = 0; j < state.num_permutations; ++j) { printf("%g ", state.costs[i*state.num_permutations + j]); } printf("\n"); } } } template <typename StateType> __global__ void print_best_permute_id(StateType state, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { printf("[%d] = %d\n", i, state.best_permute_id[i]); } } } template <typename StateType> __global__ void print_instance_nets(StateType state, int group_id, int offset) { assert(blockDim.x == 1 && gridDim.x == 1); if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); int size = state.reorder_instances.size(group_id); assert(size >= 0 && size < state.reorder_instances.size2); for (int i = 0; i < size; ++i) { int instance_nets_size = state.instance_nets_size[i]; printf("inst[%d][%d] nets: ", i, instance_nets_size); assert(instance_nets_size >= 0 && instance_nets_size < MAX_NUM_NETS_PER_INSTANCE); for (int j = 0; j < instance_nets_size; ++j) { int index = i*MAX_NUM_NETS_PER_INSTANCE + j; assert(index >= 0 && index < state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE); printf("%d (%d) ", state.instance_nets[index].net_id, state.instance_nets[index].node_marker); } printf("\n"); } } } template <typename StateType> __global__ void print_instance_net_bboxes(StateType state, int group_id, int offset) { assert(blockDim.x == 1 && gridDim.x == 1); if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, %s\n", group_id, offset, __func__); int size = state.reorder_instances.size(group_id); assert(size >= 0 && size < state.reorder_instances.size2); for (int i = 0; i < size; ++i) { int instance_nets_size = state.instance_nets_size[i]; printf("inst[%d][%d] nets: ", i, instance_nets_size); assert(instance_nets_size >= 0 && instance_nets_size < MAX_NUM_NETS_PER_INSTANCE); for (int j = 0; j < instance_nets_size; ++j) { int index = i*MAX_NUM_NETS_PER_INSTANCE + j; assert(index >= 0 && index < state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE); printf("%d/%d:%g/%g ", index, j, state.instance_nets[index].bxl, state.instance_nets[index].bxh); } printf("\n"); } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void check_instance_nets(DetailedPlaceDBType db, StateType state, int group_id) { if (blockIdx.x == 0 && threadIdx.x == 0) { for (int i = 0; i < state.reorder_instances.size(group_id); ++i) { auto const& inst = state.reorder_instances(group_id, i); auto row2nodes = state.row2node_map(inst.row_id) + inst.idx_bgn; int K = inst.idx_end-inst.idx_bgn; for (int j = 0; j < K; ++j) { int node_id = row2nodes[j]; int node2pin_id = db.flat_node2pin_start_map[node_id]; const int node2pin_id_end = db.flat_node2pin_start_map[node_id+1]; for (; node2pin_id < node2pin_id_end; ++node2pin_id) { int node_pin_id = db.flat_node2pin_map[node2pin_id]; int net_id = db.pin2net_map[node_pin_id]; if (db.net_mask[net_id]) { bool found = false; for (int k = 0; k < state.instance_nets_size[i]; ++k) { auto const& instance_net = state.instance_nets[i*MAX_NUM_NETS_PER_INSTANCE + k]; if (instance_net.net_id == net_id) { found = true; assert((instance_net.node_marker & (1<<j))); assert(instance_net.pin_offset_x[j] == db.pin_offset_x[node_pin_id]); break; } } assert(found); } } } } } } template <typename DetailedPlaceDBType> __global__ void print_pos(DetailedPlaceDBType db, int group_id, int offset) { if (blockIdx.x == 0 && threadIdx.x == 0) { printf("group_id %d, offset %d, pos[%d]\n", group_id, offset, db.num_movable_nodes); for (int i = 0; i < db.num_movable_nodes; ++i) { printf("[%d] = %g, %g\n", i, db.x[i], db.y[i]); } } } template <typename DetailedPlaceDBType, typename StateType> __global__ void reset_state(DetailedPlaceDBType db, StateType state, int group_id) { typedef typename DetailedPlaceDBType::type T; __shared__ int group_size; __shared__ int group_size_with_permutation; if (threadIdx.x == 0) { group_size = state.reorder_instances.size(group_id); group_size_with_permutation = group_size*state.num_permutations; } __syncthreads(); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < group_size_with_permutation; i += blockDim.x * gridDim.x) { state.costs[i] = cuda::numeric_limits<T>::max(); } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < state.reorder_instances.size2; i += blockDim.x * gridDim.x) { state.instance_nets_size[i] = 0; } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < db.num_nodes; i += blockDim.x * gridDim.x) { state.node_markers[i] = 0; state.node2inst_map[i] = cuda::numeric_limits<int>::max(); } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < db.num_nets; i += blockDim.x * gridDim.x) { state.net_markers[i] = 0; } int instance_nets_size = state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < instance_nets_size; i += blockDim.x * gridDim.x) { auto& instance_net = state.instance_nets[i]; instance_net.net_id = cuda::numeric_limits<int>::max(); instance_net.node_marker = 0; instance_net.bxl = db.xh; instance_net.bxh = db.xl; for (int j = 0; j < MAX_K; ++j) { instance_net.pin_offset_x[j] = 0; } } } #ifdef DYNAMIC #define CEILDIV CUDACeilDiv #else #define CEILDIV CPUCeilDiv #endif template <typename T> #ifdef DYNAMIC __global__ void k_reorder(DetailedPlaceDB<T> db, KReorderState<T> state) #else void k_reorder(DetailedPlaceDB<T>& db, KReorderState<T>& state, const std::vector<std::vector<KReorderInstance> >& host_reorder_instances) #endif { #ifdef TIMER hr_clock_rep timer_start, timer_stop; hr_clock_rep enumeration_time = 0, apply_reorder_time = 0; int enumeration_runs = 0, apply_reorder_runs = 0; #endif for (int group_id = 0; group_id < state.reorder_instances.size1; ++group_id) { #ifdef DYNAMIC int group_size = state.reorder_instances.size(group_id); #else dreamplaceAssert(state.reorder_instances.size1 == host_reorder_instances.size()); int group_size = host_reorder_instances[group_id].size(); #endif if (group_size) { for (int offset = 0; offset < state.K; offset += state.K/2) { #ifdef TIMER timer_start = get_globaltime(); #endif reset_state<<<64, 512>>>(db, state, group_id); compute_node2inst_map<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id, offset); compute_net_markers<<<CEILDIV(db.num_movable_nodes, 256), 256>>>(db, state); //print_net_markers<<<1, 1>>>(db, state); #ifdef DETERMINISTIC compute_instance_nets<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id, offset); #else compute_instance_nets<<<CEILDIV(db.num_nets, 256), 256>>>(db, state); #endif //print_instance_nets<<<1, 1>>>(state, group_id); unique_instance_nets<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id); //print_instance_nets<<<1, 1>>>(state, group_id, offset); //check_instance_nets<<<1, 1>>>(db, state, group_id); compute_instance_net_boxes<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id, offset); //print_instance_net_bboxes<<<1, 1>>>(state, group_id, offset); compute_reorder_hpwl<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id, offset); #ifdef TIMER checkCUDA(cudaDeviceSynchronize()); timer_stop = get_globaltime(); enumeration_time += timer_stop-timer_start; enumeration_runs += 1; #endif #ifdef TIMER timer_start = get_globaltime(); #endif //print_costs<<<1, 1>>>(state, group_id, offset); reduce_min_2d_cub<T, 32><<<group_size, 32>>>(state.costs, state.best_permute_id, group_size, state.num_permutations); //print_best_permute_id<<<1, 1>>>(state, group_id, offset); apply_reorder<<<CEILDIV(group_size, 256), 256>>>(db, state, group_id, offset); #ifdef TIMER checkCUDA(cudaDeviceSynchronize()); timer_stop = get_globaltime(); apply_reorder_time += timer_stop-timer_start; apply_reorder_runs += 1; #endif //print_pos<<<1, 1>>>(db, group_id, offset); } } } #ifdef TIMER dreamplacePrint(kDEBUG, "enumeration takes %g ms for %d runs, average %g ms\n", get_timer_period()*enumeration_time, enumeration_runs, get_timer_period()*enumeration_time/enumeration_runs); dreamplacePrint(kDEBUG, "apply_reorder takes %g ms for %d runs, average %g ms\n", get_timer_period()*apply_reorder_time, apply_reorder_runs, get_timer_period()*apply_reorder_time/apply_reorder_runs); #endif } template <typename T> int kreorderCUDALauncher(DetailedPlaceDB<T> db, int K, int max_iters, int num_threads) { dreamplacePrint(kDEBUG, "%d-reorder\n", K); hr_clock_rep total_time_start, total_time_stop; hr_clock_rep kernel_time_start, kernel_time_stop; hr_clock_rep iter_time_start, iter_time_stop; total_time_start = get_globaltime(); const float stop_threshold = 0.1/100; // fix random seed std::srand(1000); KReorderState<T> state; DetailedPlaceDB<T> cpu_db; state.K = K; // distribute cells to rows on host // copy cell locations from device to host std::vector<std::vector<int> > host_row2node_map (db.num_sites_y); std::vector<T> host_node_space_x (db.num_movable_nodes); std::vector<std::vector<int> > host_permutations = quick_perm(K); std::vector<unsigned char> host_adjacency_matrix; std::vector<std::vector<int> > host_row_graph; std::vector<std::vector<int> > host_independent_rows; std::vector<std::vector<KReorderInstance> > host_reorder_instances; dreamplacePrint(kDEBUG, "%lu permutations\n", host_permutations.size()); // initialize cpu db from db { iter_time_start = get_globaltime(); cpu_db.xl = db.xl; cpu_db.yl = db.yl; cpu_db.xh = db.xh; cpu_db.yh = db.yh; cpu_db.site_width = db.site_width; cpu_db.row_height = db.row_height; cpu_db.bin_size_x = db.bin_size_x; cpu_db.bin_size_y = db.bin_size_y; cpu_db.num_bins_x = db.num_bins_x; cpu_db.num_bins_y = db.num_bins_y; cpu_db.num_sites_x = db.num_sites_x; cpu_db.num_sites_y = db.num_sites_y; cpu_db.num_nodes = db.num_nodes; cpu_db.num_movable_nodes = db.num_movable_nodes; cpu_db.num_nets = db.num_nets; cpu_db.num_pins = db.num_pins; allocateCopyCPU(cpu_db.net_mask, db.net_mask, db.num_nets, unsigned char); allocateCopyCPU(cpu_db.flat_net2pin_start_map, db.flat_net2pin_start_map, db.num_nets+1, int); allocateCopyCPU(cpu_db.flat_net2pin_map, db.flat_net2pin_map, db.num_pins, int); allocateCopyCPU(cpu_db.pin2node_map, db.pin2node_map, db.num_pins, int); allocateCopyCPU(cpu_db.x, db.x, db.num_nodes, T); allocateCopyCPU(cpu_db.y, db.y, db.num_nodes, T); allocateCopyCPU(cpu_db.node_size_x, db.node_size_x, db.num_nodes, T); allocateCopyCPU(cpu_db.node_size_y, db.node_size_y, db.num_nodes, T); make_row2node_map(cpu_db, cpu_db.x, cpu_db.y, host_row2node_map, num_threads); host_node_space_x.resize(cpu_db.num_movable_nodes); for (int i = 0; i < cpu_db.num_sites_y; ++i) { for (unsigned int j = 0; j < host_row2node_map.at(i).size(); ++j) { int node_id = host_row2node_map[i][j]; if (node_id < db.num_movable_nodes) { auto& space = host_node_space_x[node_id]; T space_xl = cpu_db.x[node_id]; T space_xh = cpu_db.xh; if (j+1 < host_row2node_map[i].size()) { int right_node_id = host_row2node_map[i][j+1]; space_xh = min(space_xh, cpu_db.x[right_node_id]); } space = space_xh-space_xl; // align space to sites, as I assume space_xl aligns to sites // I also assume node width should be integral numbers of sites space = floor(space / db.site_width) * db.site_width; T node_size_x = cpu_db.node_size_x[node_id]; dreamplaceAssertMsg(space >= node_size_x, "space %g, node_size_x[%d] %g, original space (%g, %g), site_width %g", space, node_id, node_size_x, space_xl, space_xh, db.site_width); } #ifdef DEBUG if (node_id < cpu_db.num_movable_nodes) { dreamplaceAssert(space >= cpu_db.node_size_x[node_id]); } #endif } } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "initializing CPU DB takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); iter_time_start = get_globaltime(); compute_row_conflict_graph(cpu_db, host_row2node_map, host_adjacency_matrix, host_row_graph, num_threads); compute_independent_rows(cpu_db, host_row_graph, host_independent_rows); iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "computing independent rows takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); iter_time_start = get_globaltime(); compute_reorder_instances(cpu_db, host_row2node_map, host_independent_rows, host_reorder_instances, state.K); iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "computing reorder instances takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); } // initialize cuda state iter_time_start = get_globaltime(); { allocateCopyCUDA(state.node_space_x, host_node_space_x.data(), db.num_movable_nodes); std::vector<int> host_permutations_flat (host_permutations.size()*K); for (unsigned int i = 0; i < host_permutations.size(); ++i) { std::copy(host_permutations[i].begin(), host_permutations[i].end(), host_permutations_flat.begin() + i*K); } state.num_permutations = host_permutations.size(); allocateCopyCUDA(state.permutations, host_permutations_flat.data(), state.num_permutations*state.K); state.row2node_map.initialize(host_row2node_map); state.reorder_instances.initialize(host_reorder_instances); allocateCUDA(state.costs, state.reorder_instances.size2*state.num_permutations, T); allocateCUDA(state.best_permute_id, state.reorder_instances.size2, int); allocateCUDA(state.instance_nets, state.reorder_instances.size2*MAX_NUM_NETS_PER_INSTANCE, InstanceNet<T>); allocateCUDA(state.instance_nets_size, state.reorder_instances.size2, int); allocateCUDA(state.node2inst_map, db.num_nodes, int); allocateCUDA(state.net_markers, db.num_nets, int); allocateCUDA(state.node_markers, db.num_nodes, unsigned char); allocateCUDA(state.device_num_moved, 1, int); allocateCUDA(state.net_hpwls, db.num_nets, long); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "initializing CUDA state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); kernel_time_start = get_globaltime(); double hpwls [max_iters+1]; hpwls[0] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls); dreamplacePrint(kINFO, "initial hpwl = %.3f\n", hpwls[0]); for (int iter = 0; iter < max_iters; ++iter) { iter_time_start = get_globaltime(); k_reorder #ifdef DYNAMIC <<<1, 1>>>(db, state); #else (db, state, host_reorder_instances); #endif checkCUDA(cudaDeviceSynchronize()); iter_time_stop = get_globaltime(); dreamplacePrint(kINFO, "Iteration time(ms) \t %g\n", get_timer_period() * (iter_time_stop - iter_time_start)); hpwls[iter+1] = compute_total_hpwl(db, db.x, db.y, state.net_hpwls); dreamplacePrint(kINFO, "iteration %d: hpwl %.3f => %.3f (imp. %g%%)\n", iter, hpwls[0], hpwls[iter+1], (1.0-hpwls[iter+1]/(double)hpwls[0])*100); if ((iter&1) && hpwls[iter]-hpwls[iter-1] > -stop_threshold*hpwls[0]) { break; } } checkCUDA(cudaDeviceSynchronize()); kernel_time_stop = get_globaltime(); // destroy cuda state iter_time_start = get_globaltime(); { destroyCUDA(state.node_space_x); destroyCUDA(state.permutations); state.row2node_map.destroy(); state.reorder_instances.destroy(); destroyCUDA(state.costs); destroyCUDA(state.best_permute_id); destroyCUDA(state.instance_nets); destroyCUDA(state.instance_nets_size); destroyCUDA(state.node2inst_map); destroyCUDA(state.net_markers); destroyCUDA(state.node_markers); destroyCUDA(state.device_num_moved); destroyCUDA(state.net_hpwls); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "destroying CUDA state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); total_time_stop = get_globaltime(); // destroy cpu db iter_time_start = get_globaltime(); { destroyCPU(cpu_db.net_mask); destroyCPU(cpu_db.flat_net2pin_start_map); destroyCPU(cpu_db.flat_net2pin_map); destroyCPU(cpu_db.pin2node_map); destroyCPU(cpu_db.x); destroyCPU(cpu_db.y); destroyCPU(cpu_db.node_size_x); destroyCPU(cpu_db.node_size_y); } iter_time_stop = get_globaltime(); dreamplacePrint(kDEBUG, "destroying CPU state takes %g ms\n", get_timer_period()*(iter_time_stop-iter_time_start)); dreamplacePrint(kINFO, "Kernel time: %g ms\n", get_timer_period()*(kernel_time_stop-kernel_time_start)); dreamplacePrint(kINFO, "K-reorder time: %g ms\n", get_timer_period()*(total_time_stop-total_time_start)); return 0; } #define REGISTER_KERNEL_LAUNCHER(T) \ void instantiateKReorderCUDALauncher(\ DetailedPlaceDB<T> db, \ int K, \ int max_iters, \ int num_threads \ )\ {\ kreorderCUDALauncher<T>(\ db, \ K, \ max_iters, \ num_threads \ );\ } REGISTER_KERNEL_LAUNCHER(int); REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
269a95f2bca18a610ea10d469bb11e1a87963e4a.hip
// !!! This is a file automatically generated by hipify!!! /* Compute array sum using MPI, OpenMP and CUDA*/ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <mpi.h> #include <omp.h> #include <stdlib.h> #include <unistd.h> #define TOTALN 120120 #define BLOCKS_PerGrid 32 #define THREADS_PerBlock 64 __global__ void SumArray(float *c, float *a,int m) { __shared__ float mycache[THREADS_PerBlock]; int i = threadIdx.x+blockIdx.x*blockDim.x; int j = gridDim.x*blockDim.x; int cacheN; float sum; int k; sum=0; cacheN=threadIdx.x; while(i<m) { sum += a[i]; i = i+j; } mycache[cacheN]=sum; __syncthreads(); k=THREADS_PerBlock>>1; while(k) { if(cacheN<k) { mycache[cacheN] += mycache[cacheN+k]; } __syncthreads(); k=k>>1; } if(cacheN==0) { c[blockIdx.x]=mycache[0]; } } int main(int argc, char* argv[]) { int pid, np, elements_per_process, element_per_GPU; float local_sum = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &pid); MPI_Comm_size(MPI_COMM_WORLD, &np); elements_per_process = TOTALN / np; float a[TOTALN] ; int j; srand48(1<<12); for(j=0;j<TOTALN;j++) { a[j]=(float) drand48(); } //CPU version float sum_serial; sum_serial=0; for(j=0;j<TOTALN;j++){ sum_serial += a[j]; } int N = 16; // 16 CPUs and 2GPUs float sum_per_thread[N]; for(int i = 0; i < N; i++){sum_per_thread[i] =0;} int M =2; float c[M*BLOCKS_PerGrid] ; element_per_GPU = elements_per_process/(M+1); #pragma omp parallel num_threads(N) { int tid = omp_get_thread_num(); if (tid < M) { /* For GPU */ int GPU_index = (pid*elements_per_process)+tid * element_per_GPU; float *dev_a = 0; float *dev_c = 0; hipMalloc((void**)&dev_c, BLOCKS_PerGrid * sizeof(float)); hipMalloc((void**)&dev_a, element_per_GPU * sizeof(float)); hipMemcpy(dev_a, &a[GPU_index], element_per_GPU * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( SumArray), dim3(BLOCKS_PerGrid), dim3(THREADS_PerBlock), 0, 0, dev_c, dev_a, element_per_GPU); hipDeviceSynchronize(); hipMemcpy(&c[tid*BLOCKS_PerGrid], dev_c, BLOCKS_PerGrid * sizeof(float), hipMemcpyDeviceToHost); hipFree(dev_c); hipFree(dev_a); for(int j1=tid*BLOCKS_PerGrid;j1<(tid+1)*BLOCKS_PerGrid;j1++){ sum_per_thread[tid] += c[j1]; } } else if(tid>=M&&tid !=N-1){ int Nt = (elements_per_process-M*element_per_GPU)/(N-M); int i_start = (pid*elements_per_process)+M*element_per_GPU+(tid-M)*Nt; int i_end = (pid*elements_per_process)+M*element_per_GPU+((tid-M)+1)*Nt; for (int i = i_start; i<i_end; i++){ sum_per_thread[tid]+=a[i]; } } else if (tid==N-1){ int Nt = (elements_per_process-M*element_per_GPU)-(N-M-1)*((elements_per_process-M*element_per_GPU)/(N-M)); int i_start = (pid+1)*elements_per_process-Nt; int i_end = (pid+1)*elements_per_process; for (int i = i_start; i<i_end; i++){ sum_per_thread[tid]+=a[i]; } } } #pragma omp barrier for(int i = 0; i < N; i++){local_sum +=sum_per_thread[i];} MPI_Barrier(MPI_COMM_WORLD); float global_sum =0; MPI_Allreduce ( &local_sum, &global_sum, 1,MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD ); MPI_Finalize(); printf("local_sum=%f; global_sum=%f; sum_serial=%f\n",local_sum,global_sum,sum_serial); return 0; }
269a95f2bca18a610ea10d469bb11e1a87963e4a.cu
/* Compute array sum using MPI, OpenMP and CUDA*/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <math.h> #include <stdio.h> #include <mpi.h> #include <omp.h> #include <stdlib.h> #include <unistd.h> #define TOTALN 120120 #define BLOCKS_PerGrid 32 #define THREADS_PerBlock 64 __global__ void SumArray(float *c, float *a,int m) { __shared__ float mycache[THREADS_PerBlock]; int i = threadIdx.x+blockIdx.x*blockDim.x; int j = gridDim.x*blockDim.x; int cacheN; float sum; int k; sum=0; cacheN=threadIdx.x; while(i<m) { sum += a[i]; i = i+j; } mycache[cacheN]=sum; __syncthreads(); k=THREADS_PerBlock>>1; while(k) { if(cacheN<k) { mycache[cacheN] += mycache[cacheN+k]; } __syncthreads(); k=k>>1; } if(cacheN==0) { c[blockIdx.x]=mycache[0]; } } int main(int argc, char* argv[]) { int pid, np, elements_per_process, element_per_GPU; float local_sum = 0; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &pid); MPI_Comm_size(MPI_COMM_WORLD, &np); elements_per_process = TOTALN / np; float a[TOTALN] ; int j; srand48(1<<12); for(j=0;j<TOTALN;j++) { a[j]=(float) drand48(); } //CPU version float sum_serial; sum_serial=0; for(j=0;j<TOTALN;j++){ sum_serial += a[j]; } int N = 16; // 16 CPUs and 2GPUs float sum_per_thread[N]; for(int i = 0; i < N; i++){sum_per_thread[i] =0;} int M =2; float c[M*BLOCKS_PerGrid] ; element_per_GPU = elements_per_process/(M+1); #pragma omp parallel num_threads(N) { int tid = omp_get_thread_num(); if (tid < M) { /* For GPU */ int GPU_index = (pid*elements_per_process)+tid * element_per_GPU; float *dev_a = 0; float *dev_c = 0; cudaMalloc((void**)&dev_c, BLOCKS_PerGrid * sizeof(float)); cudaMalloc((void**)&dev_a, element_per_GPU * sizeof(float)); cudaMemcpy(dev_a, &a[GPU_index], element_per_GPU * sizeof(float), cudaMemcpyHostToDevice); SumArray<<<BLOCKS_PerGrid, THREADS_PerBlock>>>(dev_c, dev_a, element_per_GPU); cudaDeviceSynchronize(); cudaMemcpy(&c[tid*BLOCKS_PerGrid], dev_c, BLOCKS_PerGrid * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(dev_c); cudaFree(dev_a); for(int j1=tid*BLOCKS_PerGrid;j1<(tid+1)*BLOCKS_PerGrid;j1++){ sum_per_thread[tid] += c[j1]; } } else if(tid>=M&&tid !=N-1){ int Nt = (elements_per_process-M*element_per_GPU)/(N-M); int i_start = (pid*elements_per_process)+M*element_per_GPU+(tid-M)*Nt; int i_end = (pid*elements_per_process)+M*element_per_GPU+((tid-M)+1)*Nt; for (int i = i_start; i<i_end; i++){ sum_per_thread[tid]+=a[i]; } } else if (tid==N-1){ int Nt = (elements_per_process-M*element_per_GPU)-(N-M-1)*((elements_per_process-M*element_per_GPU)/(N-M)); int i_start = (pid+1)*elements_per_process-Nt; int i_end = (pid+1)*elements_per_process; for (int i = i_start; i<i_end; i++){ sum_per_thread[tid]+=a[i]; } } } #pragma omp barrier for(int i = 0; i < N; i++){local_sum +=sum_per_thread[i];} MPI_Barrier(MPI_COMM_WORLD); float global_sum =0; MPI_Allreduce ( &local_sum, &global_sum, 1,MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD ); MPI_Finalize(); printf("local_sum=%f; global_sum=%f; sum_serial=%f\n",local_sum,global_sum,sum_serial); return 0; }
cc3d16c276697f733f38579ab2b93329520f0bc3.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" #include <iostream> namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int N, int *odata, int d){ int index = threadIdx.x + (blockIdx.x * blockDim.x); // blocksize is changing if (index < (N >> (d + 1)) ){ int idx = index << (d + 1); odata[idx + (1 << (d + 1)) - 1] += odata[idx + (1 << d) - 1]; } } __global__ void kernDownSweep(int N, int *odata, int d){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < (N >> (d + 1)) ) { int idx = index << (d + 1); int tmp = odata[idx + (1 << d) - 1]; odata[idx + (1 << d) - 1] = odata[idx + (1 << (d + 1)) - 1]; odata[idx + (1 << (d + 1)) - 1] += tmp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // when n is not power of two, need to allocate more space to zero pad int d = ilog2ceil(n); int N = 1 << d; int timer_started = 0; dim3 fullBlockPerGrid; int* dev_out; hipMalloc((void**)&dev_out, sizeof(int) * N); checkCUDAError("hipMalloc dev_out failed"); hipMemset(dev_out, 0, sizeof(int) * N); checkCUDAError("cuda Memset failed"); hipMemcpy(dev_out, idata, sizeof(int) * n, hipMemcpyHostToDevice); checkCUDAError("hipMemcpyHostToDevice failed"); try { timer().startGpuTimer(); } catch(...) { // timer already started timer_started = 1; } // without shared memory, the algorithm needs to be called for d times for (int i = 0; i < d; i++){ fullBlockPerGrid = ((1 << (d - i - 1)) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernUpSweep), dim3(fullBlockPerGrid), dim3(blockSize), 0, 0, N, dev_out, i); checkCUDAError("kernUpSweep failed"); } hipMemset(dev_out + N - 1, 0, sizeof(int)); for (int i = d - 1; i >= 0; i--){ fullBlockPerGrid = ((1 << (d - i - 1)) + blockSize - 1) / blockSize; hipLaunchKernelGGL(( kernDownSweep), dim3(fullBlockPerGrid), dim3(blockSize), 0, 0, N, dev_out, i); checkCUDAError("kernDownpSweep failed"); } if (!timer_started) timer().endGpuTimer(); hipMemcpy(odata, dev_out, sizeof(int) * n, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpyDeviceToHost failed"); hipFree(dev_out); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { dim3 fullBlockPerGrid((n + blockSize - 1) / blockSize); int* bools, *indices, *dev_in, *dev_out; int num_element; hipMalloc((void**)&bools, sizeof(int) * n); checkCUDAError("hipMalloc bools failed"); hipMalloc((void**)&indices, sizeof(int) * n); checkCUDAError("hipMalloc indices failed"); hipMalloc((void**)&dev_out, sizeof(int) * n); checkCUDAError("hipMalloc dev_out failed"); hipMalloc((void**)&dev_in, sizeof(int) * n); checkCUDAError("hipMalloc dev_in failed"); // lots of memcpy... hipMemcpy(dev_in, idata, sizeof(int) * n, hipMemcpyHostToDevice); checkCUDAError("hipMemcpyHostToDevice failed"); timer().startGpuTimer(); StreamCompaction::Common::hipLaunchKernelGGL(( kernMapToBoolean), dim3(fullBlockPerGrid), dim3(blockSize), 0, 0, n, bools, dev_in); checkCUDAError("kernMapToBoolean failed"); hipMemcpy(odata, bools, sizeof(int) * n, hipMemcpyDeviceToHost); num_element = odata[n - 1]; checkCUDAError("hipMemcpyDeviceToHost failed"); scan(n, odata, odata); num_element += odata[n - 1]; hipMemcpy(indices, odata, sizeof(int) * n, hipMemcpyHostToDevice); checkCUDAError("hipMemcpyHostToDevice failed"); hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(fullBlockPerGrid), dim3(blockSize), 0, 0, n, dev_out, dev_in, bools, indices); timer().endGpuTimer(); hipMemcpy(odata, dev_out, sizeof(int) * n, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpyDeviceToHost failed"); hipFree(bools); hipFree(indices); hipFree(dev_in); hipFree(dev_out); return num_element; } } }
cc3d16c276697f733f38579ab2b93329520f0bc3.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" #include <iostream> namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweep(int N, int *odata, int d){ int index = threadIdx.x + (blockIdx.x * blockDim.x); // blocksize is changing if (index < (N >> (d + 1)) ){ int idx = index << (d + 1); odata[idx + (1 << (d + 1)) - 1] += odata[idx + (1 << d) - 1]; } } __global__ void kernDownSweep(int N, int *odata, int d){ int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < (N >> (d + 1)) ) { int idx = index << (d + 1); int tmp = odata[idx + (1 << d) - 1]; odata[idx + (1 << d) - 1] = odata[idx + (1 << (d + 1)) - 1]; odata[idx + (1 << (d + 1)) - 1] += tmp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // when n is not power of two, need to allocate more space to zero pad int d = ilog2ceil(n); int N = 1 << d; int timer_started = 0; dim3 fullBlockPerGrid; int* dev_out; cudaMalloc((void**)&dev_out, sizeof(int) * N); checkCUDAError("cudaMalloc dev_out failed"); cudaMemset(dev_out, 0, sizeof(int) * N); checkCUDAError("cuda Memset failed"); cudaMemcpy(dev_out, idata, sizeof(int) * n, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpyHostToDevice failed"); try { timer().startGpuTimer(); } catch(...) { // timer already started timer_started = 1; } // without shared memory, the algorithm needs to be called for d times for (int i = 0; i < d; i++){ fullBlockPerGrid = ((1 << (d - i - 1)) + blockSize - 1) / blockSize; kernUpSweep<<<fullBlockPerGrid, blockSize>>>(N, dev_out, i); checkCUDAError("kernUpSweep failed"); } cudaMemset(dev_out + N - 1, 0, sizeof(int)); for (int i = d - 1; i >= 0; i--){ fullBlockPerGrid = ((1 << (d - i - 1)) + blockSize - 1) / blockSize; kernDownSweep<<<fullBlockPerGrid, blockSize>>>(N, dev_out, i); checkCUDAError("kernDownpSweep failed"); } if (!timer_started) timer().endGpuTimer(); cudaMemcpy(odata, dev_out, sizeof(int) * n, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpyDeviceToHost failed"); cudaFree(dev_out); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { dim3 fullBlockPerGrid((n + blockSize - 1) / blockSize); int* bools, *indices, *dev_in, *dev_out; int num_element; cudaMalloc((void**)&bools, sizeof(int) * n); checkCUDAError("cudaMalloc bools failed"); cudaMalloc((void**)&indices, sizeof(int) * n); checkCUDAError("cudaMalloc indices failed"); cudaMalloc((void**)&dev_out, sizeof(int) * n); checkCUDAError("cudaMalloc dev_out failed"); cudaMalloc((void**)&dev_in, sizeof(int) * n); checkCUDAError("cudaMalloc dev_in failed"); // lots of memcpy... cudaMemcpy(dev_in, idata, sizeof(int) * n, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpyHostToDevice failed"); timer().startGpuTimer(); StreamCompaction::Common:: kernMapToBoolean<<<fullBlockPerGrid, blockSize>>>(n, bools, dev_in); checkCUDAError("kernMapToBoolean failed"); cudaMemcpy(odata, bools, sizeof(int) * n, cudaMemcpyDeviceToHost); num_element = odata[n - 1]; checkCUDAError("cudaMemcpyDeviceToHost failed"); scan(n, odata, odata); num_element += odata[n - 1]; cudaMemcpy(indices, odata, sizeof(int) * n, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpyHostToDevice failed"); StreamCompaction::Common::kernScatter<<<fullBlockPerGrid, blockSize>>>(n, dev_out, dev_in, bools, indices); timer().endGpuTimer(); cudaMemcpy(odata, dev_out, sizeof(int) * n, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpyDeviceToHost failed"); cudaFree(bools); cudaFree(indices); cudaFree(dev_in); cudaFree(dev_out); return num_element; } } }
0c4befc4567364067f67208c0b6270f541a760bf.hip
// !!! This is a file automatically generated by hipify!!! /*================================================================================================== * pca_host.cu * * Edited by: William Halsey * [email protected] * * Justin Harrison * [email protected] * * THIS FILE CONTAINS * atomicFloatAdd * k_grayscale_normalize * k_project_image * k_project_image_collect * MatrixFindDistances * cudasafe * match_image * Recognition * * Description: * * Last edited: Nov. 11, 2013 * Edits: Justin - In function k_grayscale_normalize changed device_image[i].r to device_image[i].intensity for operating on grayscale image, not RGB. - In function k_project_image and k_project_image_collect changed variable name tid to thread_id William * */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ppm.h" #include "pca.h" #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> /*================================================================================================== * atomicFloatAdd * * parameters * single pointer, type float = address * variable, type float = val * * returns * N/A * This function manipulates the value held in "address." * * Description: This function employs the built in CUDA function atomicCAS in order to atomically * add the variable "val" to the value contained at "address." * In this CUDA implementation of the PCA algorithm, this will allow only one thread at a time to * modify the value at "address." * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * MatrixFindDistances (pca_host.cu) * */ __device__ inline void atomicFloatAdd(float *address, float val) { int i_val = __float_as_int(val); int tmp0 = 0; int tmp1; while((tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0) { tmp0 = tmp1; i_val = __float_as_int(val + __int_as_float(tmp1)); } return; } /*================================================================================================== * k_grayscale_normalize * * parameters * single pointer, type Pixel = device_image * variable, type int = sizeH * variable, type int = sizeW * single pointer, type eigen_type = mean * single pointer, type eigen_type = test_image_norm * * returns * N/A * Implicitly returns a value through variable "test_image_norm." * * Description: This function takes the input image "device_image" and subtracts the average * image, "mean", and stores the resulting normalized image in "test_image_norm." * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) * */ __global__ void k_grayscale_normalize(Pixel *device_image, int sizeH, int sizeW, eigen_type *mean, eigen_type *test_image_norm) { unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if (x_idx < sizeW && y_idx < sizeH) { (test_image_norm)[i] = device_image[i].intensity - mean[i] + 1; } return; } /*================================================================================================== * k_project_image * * parameters * single pointer, type eigen_type = test_image_norm normalized version of test image * single pointer, type eigen_type = test_image_d2 output * variable, type int = img_size size of each image in pixels * variable, type int = num_faces number of distinct faces contained in the database * single pointer, type eigen_type = eigenfacesT some type of global location? * * returns * N/A * * Description: * This function multiplies the normalized test image with the eigenfaces. * sums each column of eigenfacesT and places the result in test_image_d2. The * resulting matrix has the result of each reduction step with the final result in column 0. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ __global__ void k_project_image(eigen_type *test_image_norm, eigen_type *test_image_d2, int img_size, int num_faces, eigen_type *eigenfacesT) { extern __shared__ eigen_type sdata[]; unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int thread_id = threadIdx.x; // unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if (y_idx < num_faces && x_idx < img_size && thread_id < img_size) sdata[thread_id] = (eigenfacesT)[x_idx * (num_faces) + y_idx] * (test_image_norm)[x_idx]; else sdata[thread_id] = 0; // __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s = (blockDim.x) / 2; s > 0; s >>= 1) { if (thread_id < s) { sdata[thread_id] += sdata[thread_id + s]; } __syncthreads(); } /* write result for this block to global mem */ if (thread_id == 0) { test_image_d2[y_idx + blockIdx.x*num_faces] = sdata[thread_id]; } return; } /*================================================================================================== * k_project_image_collect * * parameters * single pointer, type eigen_type = test_image_d2 * variable, type int = image_size * variable, type int = width * variable, type int = final_width * * returns * N/A * * Description: * This function moves test_image_d2 into shared memory, then performs a column * reduction down to the first column. After its done in moves the result back * to global device memory in the first element of test_image_d2. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ __global__ void k_project_image_collect(eigen_type *test_image_d2, int image_size, int width, int final_width) { extern __shared__ eigen_type sdata[]; unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int thread_id = threadIdx.x; // unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if(y_idx < image_size && x_idx < width && thread_id < width) sdata[thread_id] = test_image_d2[y_idx + x_idx * image_size]; else sdata[thread_id] = 0; // __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s=(blockDim.x) / 2; s > 0; s >>= 1) { if (thread_id < s) { sdata[thread_id] += sdata[thread_id + s]; } __syncthreads(); } /* write result for this block to global mem */ if (thread_id == 0) { test_image_d2[0] = sdata[0]; } return; } /*================================================================================================== * MatrixFindDistances * * parameters * single pointer, type eigen_type = matrix * single pointer, type eigen_type = vector * variable, type int = sizeW * variable, type int = sizeH * single pointer, type int = recognized_index * * returns * N/A * * Description: * * THIS FUNCTION CALLS * atomicFloatAdd (pca_host.cu) * * THIS FUNCTION IS CALLED BY * match_image (pca_host.cu) * */ __global__ void MatrixFindDistances(eigen_type *matrix, eigen_type *vector, int sizeW, int sizeH, int *recognized_index) { extern __shared__ eigen_type sdata[]; // __shared__ unsigned int min_index[256]; unsigned int x_idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int tid = threadIdx.x; unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if(x_idx < sizeW && y_idx < sizeH && tid < sizeW) sdata[tid] = matrix[i]; else sdata[tid] = 0; __syncthreads(); if(x_idx<sizeW && y_idx<sizeH) { sdata[tid] = (sdata[tid] - vector[x_idx])*(sdata[tid] - vector[x_idx]); /* square the difference */ } __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s=(blockDim.x)/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } /* write result for this block to global mem */ if (tid == 0) { /* only 1 thread at a time can add (this is done at most 4 or so times, so parallelism isn't so important) */ atomicFloatAdd(&matrix[y_idx], sdata[0]); } return; } /*================================================================================================== * cudasafe * * parameters * variable, type hipError_t = error * single pointer, type char = message * * returns * N/A * * Description: This function is used to handle any errors that may occur with built in CUDA * function calls. The function first compares "error" with the CUDA defined constant * "hipSuccess." If they are equivalent nothing happens, but if they are not then "message" * and "error" are both printed to stderr and the program is terminated. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * main (pca.cu) * Recognition (pca_host.cu) * */ void cudasafe(hipError_t error, char* message) { if(error != hipSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); } return; } /*================================================================================================== * match_image * * parameters * double pointer, type eigen_type = database * double pointer, type eigen_type = image * variable, type int = sizeW * variable, type int = sizeH * double pointer, type eigen_type = database_d * double pointer, type int = recognized_index_d * * returns * variable, type int * * Description: allocates correct dimension for grid, block, and memory form shared memory * * THIS FUNCTION CALLS * MatrixFindDistance (pca_host.cu) * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ int match_image(eigen_type **database, eigen_type **image, int sizeW, int sizeH, eigen_type **database_d, int **recognized_index_d) { int recognized_index_h = -1; int blocksize=16; /* making a row-optimized block size (it's really 256 columns long per block, but 1 row tall) */ dim3 dimBlock( blocksize * blocksize, 1); dim3 dimGrid( ceil(float(sizeW)/float(dimBlock.x)), ceil(float(sizeH)/float(dimBlock.y))); /* make sure to grab the size for the shared memory (shmsize) */ size_t shmsize = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(float); // size_t shmsize2 = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(int) + shmsize; /* put the database in memory */ // cudasafe(hipMemcpy((*database_d),(*database),sizeH * sizeW*sizeof(eigen_type),hipMemcpyHostToDevice), "Failed to copy host->device for image database!"); // cudasafe(hipMemcpy((*image_d),(*image),sizeH*sizeof(eigen_type),hipMemcpyHostToDevice), "Failed to copy host->device for test image!"); // <<<< configurations >>>> parameters for MatrixFindDistance hipLaunchKernelGGL(( MatrixFindDistances), dim3(dimGrid), dim3(dimBlock), shmsize, 0, *database_d,*image, sizeW, sizeH, *recognized_index_d); // hipblasIsamin - index of minimum magnitude element recognized_index_h = hipblasIsamin (sizeW, (*database_d), 1); if (hipPeekAtLastError() != hipSuccess) { printf("kernel launch error: %s\n", hipGetErrorString(hipGetLastError())); } hipDeviceSynchronize(); return(recognized_index_h); } /*================================================================================================== * Recognition * * paramters * single pointer, type char = inputimage * double pointer, type eigen_type = mean_d * double pointer, type eigen_type = projectedimages * double pointer, type eigen_type = eigenfacesT * double pointer, type eigen_type = projectedtrainimages * single pointer, type long int = images * single pointer, type long int = imgsize * single pointer, type long int = facessize * double pointer, type eigen_type = database_d * double pointer, type eigen_type = image_d * double pointer, type int = recognized_index_d * double pointer, type Pixel = test_image_d * double pointer, type eigen_type = test_image_d2 * double pointer, type eigen_type = test_image_norm * * returns * N/A * * Description: * * THIS FUNCTION CALLS * ppm_image_constructor (ppm.cu) * cudasafe (pca_host.cu) * k_grayscale_normalize (pca_host.cu) * k_project_image (pca_host.cu) * k_project_image_collect (pca_host.cu) * match_image (pca_host.cu) * ppm_image_destructor (ppm.cu) * * THIS FUNCTION IS CALLED BY * main (pca.cu) * */ void Recognition(char *inputimage, eigen_type **mean_d, eigen_type **projectedimages, eigen_type **eigenfacesT, eigen_type **projectedtrainimages, long int *images, long int *imgsize, long int *facessize, eigen_type **database_d, eigen_type **image_d, int **recognized_index_d, Pixel** test_image_d, eigen_type **test_image_d2, eigen_type **test_image_norm) { char outputtext[30]; int min_index; Pixel *junk = (Pixel *)malloc(sizeof(Pixel) * (100));; PPMImage *testimage; /* timing variables */ hipEvent_t start, stop; float elapsedTime; int blocksize=32; printf("%s\n", inputimage); hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); /* read in test image */ testimage = ppm_image_constructor(inputimage); // grayscale(testimage); /* making a row-optimized block size (it's really 256 columns long per block, but 1 row tall) */ dim3 dimBlock( blocksize * blocksize, 1); /* test images in current database are */ dim3 dimGrid( ceil(float(testimage->width)/float(dimBlock.x)), ceil(float(testimage->height)/float(dimBlock.y))); dim3 dimGrid2( ceil(float(*imgsize)/float(dimBlock.x)), ceil(float(*facessize)/float(dimBlock.y))); size_t shmsize = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(float); /* copy to device memory */ sprintf(outputtext, "Failed to copy host->device for test image1!"); cudasafe(hipMemcpy((*test_image_d),(testimage->pixels),sizeof(Pixel)*testimage->height*testimage->width,hipMemcpyHostToDevice), outputtext); // cudasafe(hipMemcpy((*test_image_d),(testimage->pixels),sizeof(Pixel)*testimage->height*testimage->width,hipMemcpyHostToDevice), "Failed to copy host->device for mean vector!"); hipLaunchKernelGGL(( k_grayscale_normalize), dim3(dimGrid), dim3(dimBlock), 0, 0, *test_image_d, testimage->height, testimage->width, *mean_d, *test_image_norm); // projectedtestimage = (eigen_type *)malloc(sizeof(eigen_type) * (*facessize)); // cudasafe(hipMemcpy((projectedtestimage),(*test_image_norm), sizeof(eigen_type)*20,hipMemcpyDeviceToHost), "Failed to copy device->host for pixels!"); hipLaunchKernelGGL(( k_project_image), dim3(dimGrid2), dim3(dimBlock), shmsize, 0, *test_image_norm,*test_image_d2, *imgsize, *facessize, *eigenfacesT); hipLaunchKernelGGL(( k_project_image_collect), dim3(dimGrid2), dim3(dimBlock), shmsize, 0, *test_image_d2, *facessize, (*imgsize/(blocksize*blocksize)), 0); // cudasafe(hipMemcpy((projectedtestimage),(*test_image_d2), sizeof(eigen_type)*((*facessize)),hipMemcpyDeviceToHost), "Failed to copy device->host for test image!"); // for(i = 0; i<10; i++) { // printf("projectedtestimage: %le\n", projectedtestimage[i]); // } /* project test image (ProjectedTestImage = eigenfacesT * NormalizedInputImage) */ /* perform the matching in cuda */ min_index = match_image(projectedtrainimages, test_image_d2, (*facessize), *images, database_d, recognized_index_d) - 1; hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); printf("\nOverall Speed:\t\t\t\t%lf (ms)\n", elapsedTime); sprintf(outputtext, "%s matches image index %d.ppm\n", inputimage, min_index + 1); printf("%s", outputtext); // free(projectedtestimage); // free(testimage_normalized); ppm_image_destructor(testimage, 1); return; }
0c4befc4567364067f67208c0b6270f541a760bf.cu
/*================================================================================================== * pca_host.cu * * Edited by: William Halsey * [email protected] * * Justin Harrison * [email protected] * * THIS FILE CONTAINS * atomicFloatAdd * k_grayscale_normalize * k_project_image * k_project_image_collect * MatrixFindDistances * cudasafe * match_image * Recognition * * Description: * * Last edited: Nov. 11, 2013 * Edits: Justin - In function k_grayscale_normalize changed device_image[i].r to device_image[i].intensity for operating on grayscale image, not RGB. - In function k_project_image and k_project_image_collect changed variable name tid to thread_id William * */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include "ppm.h" #include "pca.h" #include <device_functions.h> #include <cuda_runtime_api.h> /*================================================================================================== * atomicFloatAdd * * parameters * single pointer, type float = address * variable, type float = val * * returns * N/A * This function manipulates the value held in "address." * * Description: This function employs the built in CUDA function atomicCAS in order to atomically * add the variable "val" to the value contained at "address." * In this CUDA implementation of the PCA algorithm, this will allow only one thread at a time to * modify the value at "address." * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * MatrixFindDistances (pca_host.cu) * */ __device__ inline void atomicFloatAdd(float *address, float val) { int i_val = __float_as_int(val); int tmp0 = 0; int tmp1; while((tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0) { tmp0 = tmp1; i_val = __float_as_int(val + __int_as_float(tmp1)); } return; } /*================================================================================================== * k_grayscale_normalize * * parameters * single pointer, type Pixel = device_image * variable, type int = sizeH * variable, type int = sizeW * single pointer, type eigen_type = mean * single pointer, type eigen_type = test_image_norm * * returns * N/A * Implicitly returns a value through variable "test_image_norm." * * Description: This function takes the input image "device_image" and subtracts the average * image, "mean", and stores the resulting normalized image in "test_image_norm." * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) * */ __global__ void k_grayscale_normalize(Pixel *device_image, int sizeH, int sizeW, eigen_type *mean, eigen_type *test_image_norm) { unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if (x_idx < sizeW && y_idx < sizeH) { (test_image_norm)[i] = device_image[i].intensity - mean[i] + 1; } return; } /*================================================================================================== * k_project_image * * parameters * single pointer, type eigen_type = test_image_norm normalized version of test image * single pointer, type eigen_type = test_image_d2 output * variable, type int = img_size size of each image in pixels * variable, type int = num_faces number of distinct faces contained in the database * single pointer, type eigen_type = eigenfacesT some type of global location? * * returns * N/A * * Description: * This function multiplies the normalized test image with the eigenfaces. * sums each column of eigenfacesT and places the result in test_image_d2. The * resulting matrix has the result of each reduction step with the final result in column 0. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ __global__ void k_project_image(eigen_type *test_image_norm, eigen_type *test_image_d2, int img_size, int num_faces, eigen_type *eigenfacesT) { extern __shared__ eigen_type sdata[]; unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int thread_id = threadIdx.x; // unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if (y_idx < num_faces && x_idx < img_size && thread_id < img_size) sdata[thread_id] = (eigenfacesT)[x_idx * (num_faces) + y_idx] * (test_image_norm)[x_idx]; else sdata[thread_id] = 0; // __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s = (blockDim.x) / 2; s > 0; s >>= 1) { if (thread_id < s) { sdata[thread_id] += sdata[thread_id + s]; } __syncthreads(); } /* write result for this block to global mem */ if (thread_id == 0) { test_image_d2[y_idx + blockIdx.x*num_faces] = sdata[thread_id]; } return; } /*================================================================================================== * k_project_image_collect * * parameters * single pointer, type eigen_type = test_image_d2 * variable, type int = image_size * variable, type int = width * variable, type int = final_width * * returns * N/A * * Description: * This function moves test_image_d2 into shared memory, then performs a column * reduction down to the first column. After its done in moves the result back * to global device memory in the first element of test_image_d2. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ __global__ void k_project_image_collect(eigen_type *test_image_d2, int image_size, int width, int final_width) { extern __shared__ eigen_type sdata[]; unsigned int x_idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int thread_id = threadIdx.x; // unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if(y_idx < image_size && x_idx < width && thread_id < width) sdata[thread_id] = test_image_d2[y_idx + x_idx * image_size]; else sdata[thread_id] = 0; // __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s=(blockDim.x) / 2; s > 0; s >>= 1) { if (thread_id < s) { sdata[thread_id] += sdata[thread_id + s]; } __syncthreads(); } /* write result for this block to global mem */ if (thread_id == 0) { test_image_d2[0] = sdata[0]; } return; } /*================================================================================================== * MatrixFindDistances * * parameters * single pointer, type eigen_type = matrix * single pointer, type eigen_type = vector * variable, type int = sizeW * variable, type int = sizeH * single pointer, type int = recognized_index * * returns * N/A * * Description: * * THIS FUNCTION CALLS * atomicFloatAdd (pca_host.cu) * * THIS FUNCTION IS CALLED BY * match_image (pca_host.cu) * */ __global__ void MatrixFindDistances(eigen_type *matrix, eigen_type *vector, int sizeW, int sizeH, int *recognized_index) { extern __shared__ eigen_type sdata[]; // __shared__ unsigned int min_index[256]; unsigned int x_idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y_idx = blockIdx.y; unsigned int s; /* iterators/reducers */ unsigned int tid = threadIdx.x; unsigned int i = y_idx + sizeH * x_idx; /* moved to shared memory */ if(x_idx < sizeW && y_idx < sizeH && tid < sizeW) sdata[tid] = matrix[i]; else sdata[tid] = 0; __syncthreads(); if(x_idx<sizeW && y_idx<sizeH) { sdata[tid] = (sdata[tid] - vector[x_idx])*(sdata[tid] - vector[x_idx]); /* square the difference */ } __syncthreads(); /* reduction code for adding up the columns and the placing the results in row 0 */ for (s=(blockDim.x)/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } /* write result for this block to global mem */ if (tid == 0) { /* only 1 thread at a time can add (this is done at most 4 or so times, so parallelism isn't so important) */ atomicFloatAdd(&matrix[y_idx], sdata[0]); } return; } /*================================================================================================== * cudasafe * * parameters * variable, type cudaError_t = error * single pointer, type char = message * * returns * N/A * * Description: This function is used to handle any errors that may occur with built in CUDA * function calls. The function first compares "error" with the CUDA defined constant * "cudaSuccess." If they are equivalent nothing happens, but if they are not then "message" * and "error" are both printed to stderr and the program is terminated. * * THIS FUNCTION CALLS * * THIS FUNCTION IS CALLED BY * main (pca.cu) * Recognition (pca_host.cu) * */ void cudasafe(cudaError_t error, char* message) { if(error != cudaSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1); } return; } /*================================================================================================== * match_image * * parameters * double pointer, type eigen_type = database * double pointer, type eigen_type = image * variable, type int = sizeW * variable, type int = sizeH * double pointer, type eigen_type = database_d * double pointer, type int = recognized_index_d * * returns * variable, type int * * Description: allocates correct dimension for grid, block, and memory form shared memory * * THIS FUNCTION CALLS * MatrixFindDistance (pca_host.cu) * * THIS FUNCTION IS CALLED BY * Recognition (pca_host.cu) */ int match_image(eigen_type **database, eigen_type **image, int sizeW, int sizeH, eigen_type **database_d, int **recognized_index_d) { int recognized_index_h = -1; int blocksize=16; /* making a row-optimized block size (it's really 256 columns long per block, but 1 row tall) */ dim3 dimBlock( blocksize * blocksize, 1); dim3 dimGrid( ceil(float(sizeW)/float(dimBlock.x)), ceil(float(sizeH)/float(dimBlock.y))); /* make sure to grab the size for the shared memory (shmsize) */ size_t shmsize = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(float); // size_t shmsize2 = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(int) + shmsize; /* put the database in memory */ // cudasafe(cudaMemcpy((*database_d),(*database),sizeH * sizeW*sizeof(eigen_type),cudaMemcpyHostToDevice), "Failed to copy host->device for image database!"); // cudasafe(cudaMemcpy((*image_d),(*image),sizeH*sizeof(eigen_type),cudaMemcpyHostToDevice), "Failed to copy host->device for test image!"); // <<<< configurations >>>> parameters for MatrixFindDistance MatrixFindDistances<<<dimGrid, dimBlock, shmsize>>>(*database_d,*image, sizeW, sizeH, *recognized_index_d); // cublasIsamin - index of minimum magnitude element recognized_index_h = cublasIsamin (sizeW, (*database_d), 1); if (cudaPeekAtLastError() != cudaSuccess) { printf("kernel launch error: %s\n", cudaGetErrorString(cudaGetLastError())); } cudaThreadSynchronize(); return(recognized_index_h); } /*================================================================================================== * Recognition * * paramters * single pointer, type char = inputimage * double pointer, type eigen_type = mean_d * double pointer, type eigen_type = projectedimages * double pointer, type eigen_type = eigenfacesT * double pointer, type eigen_type = projectedtrainimages * single pointer, type long int = images * single pointer, type long int = imgsize * single pointer, type long int = facessize * double pointer, type eigen_type = database_d * double pointer, type eigen_type = image_d * double pointer, type int = recognized_index_d * double pointer, type Pixel = test_image_d * double pointer, type eigen_type = test_image_d2 * double pointer, type eigen_type = test_image_norm * * returns * N/A * * Description: * * THIS FUNCTION CALLS * ppm_image_constructor (ppm.cu) * cudasafe (pca_host.cu) * k_grayscale_normalize (pca_host.cu) * k_project_image (pca_host.cu) * k_project_image_collect (pca_host.cu) * match_image (pca_host.cu) * ppm_image_destructor (ppm.cu) * * THIS FUNCTION IS CALLED BY * main (pca.cu) * */ void Recognition(char *inputimage, eigen_type **mean_d, eigen_type **projectedimages, eigen_type **eigenfacesT, eigen_type **projectedtrainimages, long int *images, long int *imgsize, long int *facessize, eigen_type **database_d, eigen_type **image_d, int **recognized_index_d, Pixel** test_image_d, eigen_type **test_image_d2, eigen_type **test_image_norm) { char outputtext[30]; int min_index; Pixel *junk = (Pixel *)malloc(sizeof(Pixel) * (100));; PPMImage *testimage; /* timing variables */ cudaEvent_t start, stop; float elapsedTime; int blocksize=32; printf("%s\n", inputimage); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /* read in test image */ testimage = ppm_image_constructor(inputimage); // grayscale(testimage); /* making a row-optimized block size (it's really 256 columns long per block, but 1 row tall) */ dim3 dimBlock( blocksize * blocksize, 1); /* test images in current database are */ dim3 dimGrid( ceil(float(testimage->width)/float(dimBlock.x)), ceil(float(testimage->height)/float(dimBlock.y))); dim3 dimGrid2( ceil(float(*imgsize)/float(dimBlock.x)), ceil(float(*facessize)/float(dimBlock.y))); size_t shmsize = size_t(dimBlock.x * dimBlock.y * dimBlock.z) * sizeof(float); /* copy to device memory */ sprintf(outputtext, "Failed to copy host->device for test image1!"); cudasafe(cudaMemcpy((*test_image_d),(testimage->pixels),sizeof(Pixel)*testimage->height*testimage->width,cudaMemcpyHostToDevice), outputtext); // cudasafe(cudaMemcpy((*test_image_d),(testimage->pixels),sizeof(Pixel)*testimage->height*testimage->width,cudaMemcpyHostToDevice), "Failed to copy host->device for mean vector!"); k_grayscale_normalize<<<dimGrid, dimBlock>>>(*test_image_d, testimage->height, testimage->width, *mean_d, *test_image_norm); // projectedtestimage = (eigen_type *)malloc(sizeof(eigen_type) * (*facessize)); // cudasafe(cudaMemcpy((projectedtestimage),(*test_image_norm), sizeof(eigen_type)*20,cudaMemcpyDeviceToHost), "Failed to copy device->host for pixels!"); k_project_image<<<dimGrid2, dimBlock, shmsize>>>(*test_image_norm,*test_image_d2, *imgsize, *facessize, *eigenfacesT); k_project_image_collect<<<dimGrid2, dimBlock, shmsize>>>(*test_image_d2, *facessize, (*imgsize/(blocksize*blocksize)), 0); // cudasafe(cudaMemcpy((projectedtestimage),(*test_image_d2), sizeof(eigen_type)*((*facessize)),cudaMemcpyDeviceToHost), "Failed to copy device->host for test image!"); // for(i = 0; i<10; i++) { // printf("projectedtestimage: %le\n", projectedtestimage[i]); // } /* project test image (ProjectedTestImage = eigenfacesT * NormalizedInputImage) */ /* perform the matching in cuda */ min_index = match_image(projectedtrainimages, test_image_d2, (*facessize), *images, database_d, recognized_index_d) - 1; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("\nOverall Speed:\t\t\t\t%lf (ms)\n", elapsedTime); sprintf(outputtext, "%s matches image index %d.ppm\n", inputimage, min_index + 1); printf("%s", outputtext); // free(projectedtestimage); // free(testimage_normalized); ppm_image_destructor(testimage, 1); return; }
2fb8be20ae6848d99a015776d936655c7fd417dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void IFFT64_device( float2 *work ) { int tid = threadIdx.x; int hi = tid>>3; int lo = tid&7; work += (blockIdx.y * gridDim.x + blockIdx.x) * 512 + tid; // // no sync in transpose is needed here if warpSize >= 32 // since the permutations are within-warp // float2 a[8]; __shared__ float smem[64*9]; load<8>( a, work, 64 ); IFFT8( a ); itwiddle<8>( a, lo, 64 ); transpose_br<8>( a, &smem[hi*8*9+lo*9], 1, &smem[hi*8*9+lo], 9, 0 ); IFFT8( a ); store<8>( a, work, 64 ); } extern "C" void IFFT64( float2 *work, int batch ) { hipLaunchKernelGGL(( IFFT64_device), dim3(grid2D(batch/8)), dim3(64) , 0, 0, work ); }
2fb8be20ae6848d99a015776d936655c7fd417dd.cu
// Written by Vasily Volkov. // Copyright (c) 2008-2009, The Regents of the University of California. // All rights reserved. #include "codelets.h" __global__ void IFFT64_device( float2 *work ) { int tid = threadIdx.x; int hi = tid>>3; int lo = tid&7; work += (blockIdx.y * gridDim.x + blockIdx.x) * 512 + tid; // // no sync in transpose is needed here if warpSize >= 32 // since the permutations are within-warp // float2 a[8]; __shared__ float smem[64*9]; load<8>( a, work, 64 ); IFFT8( a ); itwiddle<8>( a, lo, 64 ); transpose_br<8>( a, &smem[hi*8*9+lo*9], 1, &smem[hi*8*9+lo], 9, 0 ); IFFT8( a ); store<8>( a, work, 64 ); } extern "C" void IFFT64( float2 *work, int batch ) { IFFT64_device<<< grid2D(batch/8), 64 >>>( work ); }
9910b6d6ca5e45f25552eaa150b02ba73e9da68d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Codes are based on: * * <https://github.com/pytorch/pytorch/blob/master/caffe2/operators/group_norm_op.cu> * * ------------------------------------------------------------ */ #ifdef WITH_CUDA #include "core/mixedmem.h" #include "utils/op_kernel.h" #include "utils/math_utils.h" #include "utils/cub_device.h" namespace dragon { namespace kernel { /*! BatchNormBackwardTraining <T = ?, Device = CUDA> */ template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormInternalGrad( const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tp* gamma, const Tx* dy, Tp* ds, Tp* db, Tp* dgamma, Tp* dbeta) { const int outer_dim = N * S; __shared__ typename BlockReduce<Tp>::TempStorage ds_storage; __shared__ typename BlockReduce<Tp>::TempStorage db_storage; __shared__ typename BlockReduce<Tp>::TempStorage dga_storage; __shared__ typename BlockReduce<Tp>::TempStorage dbe_storage; CUDA_2D_KERNEL_LOOP1(i, C) { Tp ds_val = 0, db_val = 0; Tp dga_val = 0, dbe_val = 0; CUDA_2D_KERNEL_LOOP2(j, outer_dim) { const int idx = kOrder == StorageOrder::NCHW ? (j / S * C + i) * S + j % S : j * C + i; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i) * __ldg(dy + idx) * __ldg(x + idx); db_val += __ldg(gamma + i) * __ldg(dy + idx); dga_val += __ldg(dy + idx) *( __ldg(x + idx) - __ldg(mu + i) ) * __ldg(rsig + i); dbe_val += __ldg(dy + idx); #else ds_val += gamma[i] * dy[idx] * x[idx]; db_val += gamma[i] * dy[idx]; dga_val += dy[idx] * (x[idx] - mu[i]) * rsig[i]; dbe_val += dy[idx]; #endif } ds_val = BlockReduce<Tp>(ds_storage).Reduce(ds_val, hipcub::Sum()); db_val = BlockReduce<Tp>(db_storage).Reduce(db_val, hipcub::Sum()); dga_val = BlockReduce<Tp>(dga_storage).Reduce(dga_val, hipcub::Sum()); dbe_val = BlockReduce<Tp>(dbe_storage).Reduce(dbe_val, hipcub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; dgamma[i] = dga_val; dbeta[i] = dbe_val; } } } template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormTrainingGrad( const int nthreads, const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tp* gamma, const Tp* ds, const Tp* db, const Tx* dy, Tx* dx) { const Tp denom = Tp(1) / static_cast<Tp>(N * S); CUDA_1D_KERNEL_LOOP(i, nthreads) { const int i_param = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C; #if __CUDA_ARCH__ >= 350 const Tp u = ( __ldg(db + i_param) * __ldg(mu + i_param) - __ldg(ds + i_param) ) * (__ldg(x + i) - __ldg(mu + i_param) ) * utils::math::Cube<Tp>(__ldg(rsig + i_param)); const Tp v = __ldg(db + i_param) * __ldg(rsig + i_param); dx[i] = __ldg(gamma + i_param) * __ldg(dy + i) * __ldg(rsig + i_param) + (u - v) * denom; #else const Tp u = (db[i_param] * mu[i_param] - ds[i_param]) * (x[i] - mu[i_param]) * utils::math::Cube<Tp>(rsig[i_param]); const Tp v = db[i_param] * rsig[i_param]; dx[i] = gamma[i_param] * dy[i] * rsig[i_param] + (u - v) * denom; #endif } } /*! BatchNormBackwardInference <T = ?, Device = CUDA> */ template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormWGrad( const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tx* dy, Tp* dgamma, Tp* dbeta) { const int outer_dim = N * S; __shared__ typename BlockReduce<Tp>::TempStorage dg_storage; __shared__ typename BlockReduce<Tp>::TempStorage db_storage; CUDA_2D_KERNEL_LOOP1(i, C) { Tp dg_val = 0, db_val = 0; CUDA_2D_KERNEL_LOOP2(j, outer_dim) { const int idx = kOrder == StorageOrder::NCHW ? (j / S * C + i) * S + j % S : j * C + i; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dy + idx) * ( __ldg(x + idx) - __ldg(mu + i) ) * __ldg(rsig + i); db_val += __ldg(dy + idx); #else dg_val += dy[idx] * (x[idx] - mu[i]) * rsig[i]; db_val += dy[idx]; #endif } dg_val = BlockReduce<Tp>(dg_storage).Reduce(dg_val, hipcub::Sum()); db_val = BlockReduce<Tp>(db_storage).Reduce(db_val, hipcub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } } } template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormInferenceGrad( const int nthreads, const int C, const int S, const Tp* rsig, const Tp* gamma, const Tx* dy, Tx* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int i_param = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C; #if __CUDA_ARCH__ >= 350 dx[i] = __ldg(gamma + i_param) * __ldg(dy + i) * __ldg(rsig + i_param); #else dx[i] = gamma[i_param] * dy[i] * rsig[i_param]; #endif } } /*! Kernel Launchers */ #define DEFINE_BACKWARD_KERNEL_LAUNCHER(Tx, Tp) \ template <> void BatchNormBackwardTraining<Tx, Tp, CUDAContext>( \ const int N, \ const int C, \ const int S, \ const string& data_format, \ const Tx* x, \ const Tp* mu, \ const Tp* rsig, \ const Tp* gamma, \ const Tx* dy, \ Tp* ds, \ Tp* db, \ Tx* dx, \ Tp* dgamma, \ Tp* dbeta, \ CUDAContext* ctx) { \ auto nthreads = N * C * S; \ if (data_format == "NCHW") { \ _BatchNormInternalGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, gamma, dy, \ ds, db, dgamma, dbeta); \ _BatchNormTrainingGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, N, C, S, x, mu, rsig, gamma, ds, db, dy, dx); \ } else if (data_format == "NHWC") { \ _BatchNormInternalGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, gamma, dy, \ ds, db, dgamma, dbeta); \ _BatchNormTrainingGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, N, C, S, x, mu, rsig, gamma, ds, db, dy, dx); \ } \ } \ template <> void BatchNormBackwardInference<Tx, Tp, CUDAContext>( \ const int N, \ const int C, \ const int S, \ const string& data_format, \ const Tx* x, \ const Tp* mu, \ const Tp* rsig, \ const Tp* gamma, \ const Tx* dy, \ Tx* dx, \ Tp* dgamma, \ Tp* dbeta, \ CUDAContext* ctx) { \ auto nthreads = N * C * S; \ if (data_format == "NCHW") { \ if (dgamma != nullptr) { \ _BatchNormWGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, dy, dgamma, dbeta); \ } \ _BatchNormInferenceGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, C, S, rsig, gamma, dy, dx); \ } else if (data_format == "NHWC") { \ if (dgamma != nullptr) { \ _BatchNormWGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, dy, dgamma, dbeta); \ } \ _BatchNormInferenceGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, C, S, rsig, gamma, dy, dx); \ } \ } DEFINE_BACKWARD_KERNEL_LAUNCHER(float, float); #undef DEFINE_BACKWARD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // WITH_CUDA
9910b6d6ca5e45f25552eaa150b02ba73e9da68d.cu
/*! * Codes are based on: * * <https://github.com/pytorch/pytorch/blob/master/caffe2/operators/group_norm_op.cu> * * ------------------------------------------------------------ */ #ifdef WITH_CUDA #include "core/mixedmem.h" #include "utils/op_kernel.h" #include "utils/math_utils.h" #include "utils/cub_device.h" namespace dragon { namespace kernel { /*! BatchNormBackwardTraining <T = ?, Device = CUDA> */ template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormInternalGrad( const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tp* gamma, const Tx* dy, Tp* ds, Tp* db, Tp* dgamma, Tp* dbeta) { const int outer_dim = N * S; __shared__ typename BlockReduce<Tp>::TempStorage ds_storage; __shared__ typename BlockReduce<Tp>::TempStorage db_storage; __shared__ typename BlockReduce<Tp>::TempStorage dga_storage; __shared__ typename BlockReduce<Tp>::TempStorage dbe_storage; CUDA_2D_KERNEL_LOOP1(i, C) { Tp ds_val = 0, db_val = 0; Tp dga_val = 0, dbe_val = 0; CUDA_2D_KERNEL_LOOP2(j, outer_dim) { const int idx = kOrder == StorageOrder::NCHW ? (j / S * C + i) * S + j % S : j * C + i; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(gamma + i) * __ldg(dy + idx) * __ldg(x + idx); db_val += __ldg(gamma + i) * __ldg(dy + idx); dga_val += __ldg(dy + idx) *( __ldg(x + idx) - __ldg(mu + i) ) * __ldg(rsig + i); dbe_val += __ldg(dy + idx); #else ds_val += gamma[i] * dy[idx] * x[idx]; db_val += gamma[i] * dy[idx]; dga_val += dy[idx] * (x[idx] - mu[i]) * rsig[i]; dbe_val += dy[idx]; #endif } ds_val = BlockReduce<Tp>(ds_storage).Reduce(ds_val, cub::Sum()); db_val = BlockReduce<Tp>(db_storage).Reduce(db_val, cub::Sum()); dga_val = BlockReduce<Tp>(dga_storage).Reduce(dga_val, cub::Sum()); dbe_val = BlockReduce<Tp>(dbe_storage).Reduce(dbe_val, cub::Sum()); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; dgamma[i] = dga_val; dbeta[i] = dbe_val; } } } template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormTrainingGrad( const int nthreads, const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tp* gamma, const Tp* ds, const Tp* db, const Tx* dy, Tx* dx) { const Tp denom = Tp(1) / static_cast<Tp>(N * S); CUDA_1D_KERNEL_LOOP(i, nthreads) { const int i_param = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C; #if __CUDA_ARCH__ >= 350 const Tp u = ( __ldg(db + i_param) * __ldg(mu + i_param) - __ldg(ds + i_param) ) * (__ldg(x + i) - __ldg(mu + i_param) ) * utils::math::Cube<Tp>(__ldg(rsig + i_param)); const Tp v = __ldg(db + i_param) * __ldg(rsig + i_param); dx[i] = __ldg(gamma + i_param) * __ldg(dy + i) * __ldg(rsig + i_param) + (u - v) * denom; #else const Tp u = (db[i_param] * mu[i_param] - ds[i_param]) * (x[i] - mu[i_param]) * utils::math::Cube<Tp>(rsig[i_param]); const Tp v = db[i_param] * rsig[i_param]; dx[i] = gamma[i_param] * dy[i] * rsig[i_param] + (u - v) * denom; #endif } } /*! BatchNormBackwardInference <T = ?, Device = CUDA> */ template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormWGrad( const int N, const int C, const int S, const Tx* x, const Tp* mu, const Tp* rsig, const Tx* dy, Tp* dgamma, Tp* dbeta) { const int outer_dim = N * S; __shared__ typename BlockReduce<Tp>::TempStorage dg_storage; __shared__ typename BlockReduce<Tp>::TempStorage db_storage; CUDA_2D_KERNEL_LOOP1(i, C) { Tp dg_val = 0, db_val = 0; CUDA_2D_KERNEL_LOOP2(j, outer_dim) { const int idx = kOrder == StorageOrder::NCHW ? (j / S * C + i) * S + j % S : j * C + i; #if __CUDA_ARCH__ >= 350 dg_val += __ldg(dy + idx) * ( __ldg(x + idx) - __ldg(mu + i) ) * __ldg(rsig + i); db_val += __ldg(dy + idx); #else dg_val += dy[idx] * (x[idx] - mu[i]) * rsig[i]; db_val += dy[idx]; #endif } dg_val = BlockReduce<Tp>(dg_storage).Reduce(dg_val, cub::Sum()); db_val = BlockReduce<Tp>(db_storage).Reduce(db_val, cub::Sum()); if (threadIdx.x == 0) { dgamma[i] = dg_val; dbeta[i] = db_val; } } } template <typename Tx, typename Tp, StorageOrder kOrder> __global__ void _BatchNormInferenceGrad( const int nthreads, const int C, const int S, const Tp* rsig, const Tp* gamma, const Tx* dy, Tx* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { const int i_param = kOrder == StorageOrder::NCHW ? (i / S) % C : i % C; #if __CUDA_ARCH__ >= 350 dx[i] = __ldg(gamma + i_param) * __ldg(dy + i) * __ldg(rsig + i_param); #else dx[i] = gamma[i_param] * dy[i] * rsig[i_param]; #endif } } /*! Kernel Launchers */ #define DEFINE_BACKWARD_KERNEL_LAUNCHER(Tx, Tp) \ template <> void BatchNormBackwardTraining<Tx, Tp, CUDAContext>( \ const int N, \ const int C, \ const int S, \ const string& data_format, \ const Tx* x, \ const Tp* mu, \ const Tp* rsig, \ const Tp* gamma, \ const Tx* dy, \ Tp* ds, \ Tp* db, \ Tx* dx, \ Tp* dgamma, \ Tp* dbeta, \ CUDAContext* ctx) { \ auto nthreads = N * C * S; \ if (data_format == "NCHW") { \ _BatchNormInternalGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, gamma, dy, \ ds, db, dgamma, dbeta); \ _BatchNormTrainingGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, N, C, S, x, mu, rsig, gamma, ds, db, dy, dx); \ } else if (data_format == "NHWC") { \ _BatchNormInternalGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, gamma, dy, \ ds, db, dgamma, dbeta); \ _BatchNormTrainingGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, N, C, S, x, mu, rsig, gamma, ds, db, dy, dx); \ } \ } \ template <> void BatchNormBackwardInference<Tx, Tp, CUDAContext>( \ const int N, \ const int C, \ const int S, \ const string& data_format, \ const Tx* x, \ const Tp* mu, \ const Tp* rsig, \ const Tp* gamma, \ const Tx* dy, \ Tx* dx, \ Tp* dgamma, \ Tp* dbeta, \ CUDAContext* ctx) { \ auto nthreads = N * C * S; \ if (data_format == "NCHW") { \ if (dgamma != nullptr) { \ _BatchNormWGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, dy, dgamma, dbeta); \ } \ _BatchNormInferenceGrad<Tx, Tp, StorageOrder::NCHW> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, C, S, rsig, gamma, dy, dx); \ } else if (data_format == "NHWC") { \ if (dgamma != nullptr) { \ _BatchNormWGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_2D_BLOCKS(C), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (N, C, S, x, mu, rsig, dy, dgamma, dbeta); \ } \ _BatchNormInferenceGrad<Tx, Tp, StorageOrder::NHWC> \ << < CUDA_BLOCKS(nthreads), CUDA_THREADS, \ 0, ctx->cuda_stream() >> > \ (nthreads, C, S, rsig, gamma, dy, dx); \ } \ } DEFINE_BACKWARD_KERNEL_LAUNCHER(float, float); #undef DEFINE_BACKWARD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // WITH_CUDA
fc723210a0425859d063fd6df9d113264b3016d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (C) 2018 Schreibikus https://github.com/schreibikus * License: http://www.gnu.org/licenses/gpl.html GPL version 3 or higher */ texture<uchar4, 2> argb_tex; /* This function based on Subsample_Bilinear_uchar4 function from vf_scale_cuda.cu */ extern "C" __global__ void resizeARGB(uchar4 *dst, int dst_width, int dst_height, int dst_pitch, float hscale, float vscale) { int xo = blockIdx.x * blockDim.x + threadIdx.x; int yo = blockIdx.y * blockDim.y + threadIdx.y; if ((yo < dst_height) && (xo < dst_width)) { float xi = (xo + 0.5f) * hscale; float yi = (yo + 0.5f) * vscale; // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} float dx = wh / (0.5f + wh); float dy = wv / (0.5f + wv); uchar4 c0 = tex2D(argb_tex, xi-dx, yi-dy); uchar4 c1 = tex2D(argb_tex, xi+dx, yi-dy); uchar4 c2 = tex2D(argb_tex, xi-dx, yi+dy); uchar4 c3 = tex2D(argb_tex, xi+dx, yi+dy); int4 res; res.x = ((int)c0.x+(int)c1.x+(int)c2.x+(int)c3.x+2) >> 2; res.y = ((int)c0.y+(int)c1.y+(int)c2.y+(int)c3.y+2) >> 2; res.z = ((int)c0.z+(int)c1.z+(int)c2.z+(int)c3.z+2) >> 2; res.w = ((int)c0.w+(int)c1.w+(int)c2.w+(int)c3.w+2) >> 2; *((uchar4*)((unsigned char*)dst + yo * dst_pitch) + xo) = make_uchar4( (unsigned char)res.x, (unsigned char)res.y, (unsigned char)res.z, (unsigned char)res.w); } }
fc723210a0425859d063fd6df9d113264b3016d1.cu
/* * Copyright (C) 2018 Schreibikus https://github.com/schreibikus * License: http://www.gnu.org/licenses/gpl.html GPL version 3 or higher */ texture<uchar4, 2> argb_tex; /* This function based on Subsample_Bilinear_uchar4 function from vf_scale_cuda.cu */ extern "C" __global__ void resizeARGB(uchar4 *dst, int dst_width, int dst_height, int dst_pitch, float hscale, float vscale) { int xo = blockIdx.x * blockDim.x + threadIdx.x; int yo = blockIdx.y * blockDim.y + threadIdx.y; if ((yo < dst_height) && (xo < dst_width)) { float xi = (xo + 0.5f) * hscale; float yi = (yo + 0.5f) * vscale; // 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv} float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f); float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f); // Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh} float dx = wh / (0.5f + wh); float dy = wv / (0.5f + wv); uchar4 c0 = tex2D(argb_tex, xi-dx, yi-dy); uchar4 c1 = tex2D(argb_tex, xi+dx, yi-dy); uchar4 c2 = tex2D(argb_tex, xi-dx, yi+dy); uchar4 c3 = tex2D(argb_tex, xi+dx, yi+dy); int4 res; res.x = ((int)c0.x+(int)c1.x+(int)c2.x+(int)c3.x+2) >> 2; res.y = ((int)c0.y+(int)c1.y+(int)c2.y+(int)c3.y+2) >> 2; res.z = ((int)c0.z+(int)c1.z+(int)c2.z+(int)c3.z+2) >> 2; res.w = ((int)c0.w+(int)c1.w+(int)c2.w+(int)c3.w+2) >> 2; *((uchar4*)((unsigned char*)dst + yo * dst_pitch) + xo) = make_uchar4( (unsigned char)res.x, (unsigned char)res.y, (unsigned char)res.z, (unsigned char)res.w); } }
aijcusparse.hip
// !!! This is a file automatically generated by hipify!!! /* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetStream" PetscErrorCode MatCUSPARSESetStream(Mat A,const hipStream_t stream) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; cusparsestruct->stream = stream; stat = hipsparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUDA(stat); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetHandle" PetscErrorCode MatCUSPARSESetHandle(Mat A,const hipsparseHandle_t handle) { hipsparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (cusparsestruct->handle) { stat = hipsparseDestroy(cusparsestruct->handle);CHKERRCUDA(stat); } cusparsestruct->handle = handle; stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSEClearHandle" PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ #undef __FUNCT__ #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; #if TORCH_HIP_VERSION>=4020 switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } #else if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); #endif PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetFormat" PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); ierr = PetscObjectOptionsBegin((PetscObject)A); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); } } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } ierr = PetscOptionsEnd();CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; hipsparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscScalar *AALo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; /* Allocate Space for the lower triangular matrix */ ierr = hipHostMalloc((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = hipHostFree(AiLo);CHKERRCUDA(ierr); ierr = hipHostFree(AjLo);CHKERRCUDA(ierr); ierr = hipHostFree(AALo);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; /* Allocate Space for the upper triangular matrix */ ierr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); } /* allocate space for the triangular factor information */ upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; ierr = hipHostFree(AiUp);CHKERRCUDA(ierr); ierr = hipHostFree(AjUp);CHKERRCUDA(ierr); ierr = hipHostFree(AAUp);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; const PetscInt *r,*c; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); cusparseTriFactors->workVector = new THRUSTARRAY; cusparseTriFactors->workVector->resize(n); cusparseTriFactors->nnz=a->nz; A->valid_GPU_matrix = PETSC_CUDA_BOTH; /*lower triangular indices */ ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity) { cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); } ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); /*upper triangular indices */ ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity) { cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); } ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; hipsparseStatus_t stat; PetscErrorCode ierr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* Allocate Space for the upper triangular matrix */ ierr = hipHostMalloc((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); ierr = hipHostMalloc((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; A->valid_GPU_matrix = PETSC_CUDA_BOTH; ierr = hipHostFree(AiUp);CHKERRCUDA(ierr); ierr = hipHostFree(AjUp);CHKERRCUDA(ierr); ierr = hipHostFree(AAUp);CHKERRCUDA(ierr); ierr = hipHostFree(AALo);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; const PetscInt *rip; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); cusparseTriFactors->workVector = new THRUSTARRAY; cusparseTriFactors->workVector->resize(n); cusparseTriFactors->nnz=(a->nz-n)*2 + n; /*lower triangular indices */ ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(rip, rip+n); } ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipsparseMatrixType_t matrixType; hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscFunctionBegin; /*********************************************/ /* Now the Transpose of the Lower Tri Factor */ /*********************************************/ /* allocate space for the transpose of the lower triangular factor */ loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUDA(stat); stat = hipsparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* perform the solve analysis on the transposed matrix */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = hipsparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUDA(stat); stat = hipsparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUDA(stat); stat = hipsparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUDA(stat); stat = hipsparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* perform the solve analysis on the transposed matrix */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; hipError_t err; PetscFunctionBegin; /* allocate space for the triangular factor information */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstructT->descr);CHKERRCUDA(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = hipsparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUDA(stat); stat = hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); /* set alpha and beta */ err = hipMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT= new CsrMatrix; matrixT->num_rows = A->rmap->n; matrixT->num_cols = A->cmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); /* compute the transpose of the upper triangular factor, i.e. the CSC */ indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, matrix->num_entries, matrix->values->data().get(), matrix->row_offsets->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* assign the pointer */ matstructT->mat = matrixT; } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if TORCH_HIP_VERSION>=5000 /* First convert HYB to CSR */ CsrMatrix *temp= new CsrMatrix; temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUDA(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ CsrMatrix *tempT= new CsrMatrix; tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUDA(stat); /* assign the pointer */ matstructT->mat = hybMat; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); #endif } /* assign the compressed row indices */ matstructT->cprowIndices = new THRUSTINTARRAY; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); /* First, reorder with the row permutation */ thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; VecType t; PetscBool flg; PetscFunctionBegin; ierr = VecGetType(bb,&t);CHKERRQ(ierr); ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUDA); ierr = VecGetType(xx,&t);CHKERRQ(ierr); ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUDA); /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); /* First, reorder with the row permutation */ thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), xGPU); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, xarray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; hipsparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get());CHKERRCUDA(stat); /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx; PetscErrorCode ierr; hipsparseStatus_t stat; hipError_t err; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); try { cusparsestruct->nonzerorow=0; for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { /* Forcing compressed row on the GPU */ int k=0; ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); ii[0]=0; for (int j = 0; j<m; j++) { if ((a->i[j+1]-a->i[j])>0) { ii[k] = a->i[j]; ridx[k]= j; k++; } } ii[cusparsestruct->nonzerorow] = a->nz; m = cusparsestruct->nonzerorow; } /* allocate space for the triangular factor information */ matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = hipsparseCreateMatDescr(&matstruct->descr);CHKERRCUDA(stat); stat = hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); err = hipMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); err = hipMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUDA(err); err = hipMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),hipMemcpyHostToDevice);CHKERRCUDA(err); stat = hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *matrix= new CsrMatrix; matrix->num_rows = m; matrix->num_cols = A->cmap->n; matrix->num_entries = a->nz; matrix->row_offsets = new THRUSTINTARRAY32(m+1); matrix->row_offsets->assign(ii, ii + m+1); matrix->column_indices = new THRUSTINTARRAY32(a->nz); matrix->column_indices->assign(a->j, a->j+a->nz); matrix->values = new THRUSTARRAY(a->nz); matrix->values->assign(a->a, a->a+a->nz); /* assign the pointer */ matstruct->mat = matrix; } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if TORCH_HIP_VERSION>=4020 CsrMatrix *matrix= new CsrMatrix; matrix->num_rows = m; matrix->num_cols = A->cmap->n; matrix->num_entries = a->nz; matrix->row_offsets = new THRUSTINTARRAY32(m+1); matrix->row_offsets->assign(ii, ii + m+1); matrix->column_indices = new THRUSTINTARRAY32(a->nz); matrix->column_indices->assign(a->j, a->j+a->nz); matrix->values = new THRUSTARRAY(a->nz); matrix->values->assign(a->a, a->a+a->nz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, matstruct->descr, matrix->values->data().get(), matrix->row_offsets->data().get(), matrix->column_indices->data().get(), hybMat, 0, partition);CHKERRCUDA(stat); /* assign the pointer */ matstruct->mat = hybMat; if (matrix) { if (matrix->values) delete (THRUSTARRAY*)matrix->values; if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; delete (CsrMatrix*)matrix; } #endif } /* assign the compressed row indices */ matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); /* assign the pointer */ cusparsestruct->mat = matstruct; if (!a->compressedrow.use) { ierr = PetscFree(ii);CHKERRQ(ierr); ierr = PetscFree(ridx);CHKERRQ(ierr); } cusparsestruct->workVector = new THRUSTARRAY; cusparsestruct->workVector->resize(m); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); A->valid_GPU_matrix = PETSC_CUDA_BOTH; ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) { PetscErrorCode ierr; PetscInt rbs,cbs; PetscFunctionBegin; ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); if (right) { ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); ierr = VecSetType(*right,VECSEQCUDA);CHKERRQ(ierr); ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); } if (left) { ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); ierr = VecSetType(*left,VECSEQCUDA);CHKERRQ(ierr); ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; #undef __FUNCT__ #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstruct->beta, yarray);CHKERRCUDA(stat); } else { #if TORCH_HIP_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, matstruct->alpha, matstruct->descr, hybMat, xarray, matstruct->beta, yarray);CHKERRCUDA(stat); #endif } ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); if (!cusparsestruct->stream) { ierr = WaitForGPU();CHKERRCUDA(ierr); } ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ if (!matstructT) { ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstructT->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstructT->alpha, matstructT->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstructT->beta, yarray);CHKERRCUDA(stat); } else { #if TORCH_HIP_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, matstructT->alpha, matstructT->descr, hybMat, xarray, matstructT->beta, yarray);CHKERRCUDA(stat); #endif } ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); if (!cusparsestruct->stream) { ierr = WaitForGPU();CHKERRCUDA(ierr); } ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; thrust::device_ptr<PetscScalar> zptr; const PetscScalar *xarray; PetscScalar *zarray; PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ try { ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); zptr = thrust::device_pointer_cast(zarray); /* multiply add */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; /* here we need to be careful to set the number of rows in the multiply to the number of compressed rows in the matrix ... which is equivalent to the size of the workVector */ stat = cusparse_csr_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstruct->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } else { #if TORCH_HIP_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; if (cusparsestruct->workVector->size()) { stat = cusparse_hyb_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, matstruct->alpha, matstruct->descr, hybMat, xarray, matstruct->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } #endif } /* scatter the data from the temporary into the full vector with a += operation */ thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), VecCUDAPlusEquals()); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; thrust::device_ptr<PetscScalar> zptr; const PetscScalar *xarray; PetscScalar *zarray; PetscErrorCode ierr; hipsparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ if (!matstructT) { ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } try { ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); zptr = thrust::device_pointer_cast(zarray); /* multiply add with matrix transpose */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstructT->mat; /* here we need to be careful to set the number of rows in the multiply to the number of compressed rows in the matrix ... which is equivalent to the size of the workVector */ stat = cusparse_csr_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstructT->alpha, matstructT->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstructT->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } else { #if TORCH_HIP_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; if (cusparsestruct->workVector->size()) { stat = cusparse_hyb_spmv(cusparsestruct->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, matstructT->alpha, matstructT->descr, hybMat, xarray, matstructT->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } #endif } /* scatter the data from the temporary into the full vector with a += operation */ thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), VecCUDAPlusEquals()); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); } if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ #undef __FUNCT__ #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective on MPI_Comm Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype==MAT_FACTOR_NONE) { if (A->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) { ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } } else { ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; hipsparseStatus_t stat; hipsparseHandle_t handle=0; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); if (B->factortype==MAT_FACTOR_NONE) { /* you cannot check the inode.use flag here since the matrix was just created. now build a GPU matrix data structure */ B->spptr = new Mat_SeqAIJCUSPARSE; ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; stat = hipsparseCreate(&handle);CHKERRCUDA(stat); ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; } else { /* NEXT, set the pointers to the triangular factors */ B->spptr = new Mat_SeqAIJCUSPARSETriFactors; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; stat = hipsparseCreate(&handle);CHKERRCUDA(stat); ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->mult = MatMult_SeqAIJCUSPARSE; B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); B->valid_GPU_matrix = PETSC_CUDA_UNALLOCATED; ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*M MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); #undef __FUNCT__ #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { hipsparseStatus_t stat; hipsparseHandle_t handle; PetscFunctionBegin; if (*cusparsestruct) { Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); delete (*cusparsestruct)->workVector; if (handle = (*cusparsestruct)->handle) { stat = hipsparseDestroy(handle);CHKERRCUDA(stat); } delete *cusparsestruct; *cusparsestruct = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "CsrMatrix_Destroy" static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { hipsparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = hipsparseDestroyMatDescr((*trifactor)->descr);CHKERRCUDA(stat); } if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUDA(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); delete *trifactor; *trifactor = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; hipsparseStatus_t stat; hipError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUDA(stat); } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = hipsparseDestroyMatDescr((*matstruct)->descr);CHKERRCUDA(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha) { err=hipFree((*matstruct)->alpha);CHKERRCUDA(err); } if ((*matstruct)->beta) { err=hipFree((*matstruct)->beta);CHKERRCUDA(err); } delete *matstruct; *matstruct = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { hipsparseHandle_t handle; hipsparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; if (handle = (*trifactors)->handle) { stat = hipsparseDestroy(handle);CHKERRCUDA(stat); } delete *trifactors; *trifactors = 0; } PetscFunctionReturn(0); }
aijcusparse.cu
/* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_SPINLOCK #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR","ELL","HYB","MatCUSPARSEStorageFormat","MAT_CUSPARSE_",0}; static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,const MatFactorInfo*); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat,Mat,IS,IS,const MatFactorInfo*); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat,Mat,const MatFactorInfo*); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat,Vec,Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat,Vec,Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat,Vec,Vec,Vec); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix**); static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct**); static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct**,MatCUSPARSEStorageFormat); static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors**); static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE**); #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetStream" PetscErrorCode MatCUSPARSESetStream(Mat A,const cudaStream_t stream) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; cusparsestruct->stream = stream; stat = cusparseSetStream(cusparsestruct->handle,cusparsestruct->stream);CHKERRCUDA(stat); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetHandle" PetscErrorCode MatCUSPARSESetHandle(Mat A,const cusparseHandle_t handle) { cusparseStatus_t stat; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (cusparsestruct->handle) { stat = cusparseDestroy(cusparsestruct->handle);CHKERRCUDA(stat); } cusparsestruct->handle = handle; stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSEClearHandle" PetscErrorCode MatCUSPARSEClearHandle(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; if (cusparsestruct->handle) cusparsestruct->handle = 0; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatFactorGetSolverPackage_seqaij_cusparse" PetscErrorCode MatFactorGetSolverPackage_seqaij_cusparse(Mat A,const MatSolverPackage *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(0); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, seqaijcusparse, aijcusparse, or seqaijcusp, aijcusp. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CUSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: PCFactorSetMatSolverPackage(), MatSolverPackage, MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ #undef __FUNCT__ #define __FUNCT__ "MatGetFactor_seqaijcusparse_cusparse" PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A,MatFactorType ftype,Mat *B) { PetscErrorCode ierr; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatCreate(PetscObjectComm((PetscObject)A),B);CHKERRQ(ierr); (*B)->factortype = ftype; ierr = MatSetSizes(*B,n,n,n,n);CHKERRQ(ierr); ierr = MatSetType(*B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { ierr = MatSetBlockSizesFromMats(*B,A,A);CHKERRQ(ierr); (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Factor type not supported for CUSPARSE Matrix Types"); ierr = MatSeqAIJSetPreallocation(*B,MAT_SKIP_ALLOCATION,NULL);CHKERRQ(ierr); ierr = PetscObjectComposeFunction((PetscObject)(*B),"MatFactorGetSolverPackage_C",MatFactorGetSolverPackage_seqaij_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetFormat_SeqAIJCUSPARSE" PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; #if CUDA_VERSION>=4020 switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.",op); } #else if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format require CUDA 4.2 or later."); #endif PetscFunctionReturn(0); } /*@ MatCUSPARSESetFormat - Sets the storage format of CUSPARSE matrices for a particular operation. Only the MatMult operation can use different GPU storage formats for MPIAIJCUSPARSE matrices. Not Collective Input Parameters: + A - Matrix of type SEQAIJCUSPARSE . op - MatCUSPARSEFormatOperation. SEQAIJCUSPARSE matrices support MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL. MPIAIJCUSPARSE matrices support MAT_CUSPARSE_MULT_DIAG, MAT_CUSPARSE_MULT_OFFDIAG, and MAT_CUSPARSE_ALL. - format - MatCUSPARSEStorageFormat (one of MAT_CUSPARSE_CSR, MAT_CUSPARSE_ELL, MAT_CUSPARSE_HYB. The latter two require CUDA 4.2) Output Parameter: Level: intermediate .seealso: MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation @*/ #undef __FUNCT__ #define __FUNCT__ "MatCUSPARSESetFormat" PetscErrorCode MatCUSPARSESetFormat(Mat A,MatCUSPARSEFormatOperation op,MatCUSPARSEStorageFormat format) { PetscErrorCode ierr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID,1); ierr = PetscTryMethod(A, "MatCUSPARSESetFormat_C",(Mat,MatCUSPARSEFormatOperation,MatCUSPARSEStorageFormat),(A,op,format));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSetFromOptions_SeqAIJCUSPARSE" static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(PetscOptionItems *PetscOptionsObject,Mat A) { PetscErrorCode ierr; MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; PetscFunctionBegin; ierr = PetscOptionsHead(PetscOptionsObject,"SeqAIJCUSPARSE options");CHKERRQ(ierr); ierr = PetscObjectOptionsBegin((PetscObject)A); if (A->factortype==MAT_FACTOR_NONE) { ierr = PetscOptionsEnum("-mat_cusparse_mult_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_MULT,format);CHKERRQ(ierr); } } ierr = PetscOptionsEnum("-mat_cusparse_storage_format","sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat",MatCUSPARSEStorageFormats,(PetscEnum)cusparsestruct->format,(PetscEnum*)&format,&flg);CHKERRQ(ierr); if (flg) { ierr = MatCUSPARSESetFormat(A,MAT_CUSPARSE_ALL,format);CHKERRQ(ierr); } ierr = PetscOptionsEnd();CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatILUFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatILUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatLUFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS isrow,IS iscol,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatLUFactorSymbolic_SeqAIJ(B,A,isrow,iscol,info);CHKERRQ(ierr); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatICCFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatICCFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCholeskyFactorSymbolic_SeqAIJCUSPARSE" static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B,Mat A,IS perm,const MatFactorInfo *info) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCholeskyFactorSymbolic_SeqAIJ(B,A,perm,info);CHKERRQ(ierr); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILULowerTriMatrix" static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; cusparseStatus_t stat; const PetscInt *ai = a->i,*aj = a->j,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiLo, *AjLo; PetscScalar *AALo; PetscInt i,nz, nzLower, offset, rowOffset; PetscErrorCode ierr; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower=n+ai[n]-ai[1]; /* Allocate Space for the lower triangular matrix */ ierr = cudaMallocHost((void**) &AiLo, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AjLo, nzLower*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AALo, nzLower*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt) 0; AiLo[n] = nzLower; AjLo[0] = (PetscInt) 0; AALo[0] = (MatScalar) 1.0; v = aa; vi = aj; offset = 1; rowOffset= 1; for (i=1; i<n; i++) { nz = ai[i+1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz+1; ierr = PetscMemcpy(&(AjLo[offset]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AALo[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); offset += nz; AjLo[offset] = (PetscInt) i; AALo[offset] = (MatScalar) 1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo+n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo+nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo+nzLower); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; ierr = cudaFreeHost(AiLo);CHKERRCUDA(ierr); ierr = cudaFreeHost(AjLo);CHKERRCUDA(ierr); ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildILUUpperTriMatrix" static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; const PetscInt *aj = a->j,*adiag = a->diag,*vi; const MatScalar *aa = a->a,*v; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscInt i,nz, nzUpper, offset; PetscErrorCode ierr; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0]-adiag[n]; /* Allocate Space for the upper triangular matrix */ ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = nzUpper; for (i=n-1; i>=0; i--) { v = aa + adiag[i+1] + 1; vi = aj + adiag[i+1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i+1]-1; /* decrement the offset */ offset -= (nz+1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1./v[nz]; AiUp[i] = AiUp[i+1] - (nz+1); ierr = PetscMemcpy(&(AjUp[offset+1]), vi, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AAUp[offset+1]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); } /* allocate space for the triangular factor information */ upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp+nzUpper); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS isrow = a->row,iscol = a->icol; PetscBool row_identity,col_identity; const PetscInt *r,*c; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A);CHKERRQ(ierr); ierr = MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A);CHKERRQ(ierr); cusparseTriFactors->workVector = new THRUSTARRAY; cusparseTriFactors->workVector->resize(n); cusparseTriFactors->nnz=a->nz; A->valid_GPU_matrix = PETSC_CUDA_BOTH; /*lower triangular indices */ ierr = ISGetIndices(isrow,&r);CHKERRQ(ierr); ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); if (!row_identity) { cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r+n); } ierr = ISRestoreIndices(isrow,&r);CHKERRQ(ierr); /*upper triangular indices */ ierr = ISGetIndices(iscol,&c);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (!col_identity) { cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c+n); } ierr = ISRestoreIndices(iscol,&c);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEBuildICCTriMatrices" static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; cusparseStatus_t stat; PetscErrorCode ierr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz,n = A->rmap->n,i,offset,nz,j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ*)A->data; const PetscInt *ai = b->i,*aj = b->j,*vj; const MatScalar *aa = b->a,*v; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { try { /* Allocate Space for the upper triangular matrix */ ierr = cudaMallocHost((void**) &AiUp, (n+1)*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AjUp, nzUpper*sizeof(PetscInt));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AAUp, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); ierr = cudaMallocHost((void**) &AALo, nzUpper*sizeof(PetscScalar));CHKERRCUDA(ierr); /* Fill the upper triangular matrix */ AiUp[0]=(PetscInt) 0; AiUp[n]=nzUpper; offset = 0; for (i=0; i<n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i+1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt) i; AAUp[offset] = (MatScalar)1.0/v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0/v[nz]; offset+=1; if (nz>0) { ierr = PetscMemcpy(&(AjUp[offset]), vj, nz*sizeof(PetscInt));CHKERRQ(ierr); ierr = PetscMemcpy(&(AAUp[offset]), v, nz*sizeof(PetscScalar));CHKERRQ(ierr); for (j=offset; j<offset+nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j]/v[nz]; } offset+=nz; } } /* allocate space for the triangular factor information */ upTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactor->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp+a->nz); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ loTriFactor = new Mat_SeqAIJCUSPARSETriFactorStruct; /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactor->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactor->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp+A->rmap->n+1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp+a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo+a->nz); /* perform the solve analysis */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtr = loTriFactor; A->valid_GPU_matrix = PETSC_CUDA_BOTH; ierr = cudaFreeHost(AiUp);CHKERRCUDA(ierr); ierr = cudaFreeHost(AjUp);CHKERRCUDA(ierr); ierr = cudaFreeHost(AAUp);CHKERRCUDA(ierr); ierr = cudaFreeHost(AALo);CHKERRCUDA(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { PetscErrorCode ierr; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; IS ip = a->row; const PetscInt *rip; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; ierr = MatSeqAIJCUSPARSEBuildICCTriMatrices(A);CHKERRQ(ierr); cusparseTriFactors->workVector = new THRUSTARRAY; cusparseTriFactors->workVector->resize(n); cusparseTriFactors->nnz=(a->nz-n)*2 + n; /*lower triangular indices */ ierr = ISGetIndices(ip,&rip);CHKERRQ(ierr); ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (!perm_identity) { cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip+n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(rip, rip+n); } ierr = ISRestoreIndices(ip,&rip);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatLUFactorNumeric_SeqAIJCUSPARSE" static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS isrow = b->row,iscol = b->col; PetscBool row_identity,col_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatLUFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(isrow,&row_identity);CHKERRQ(ierr); ierr = ISIdentity(iscol,&col_identity);CHKERRQ(ierr); if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCholeskyFactorNumeric_SeqAIJCUSPARSE" static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B,Mat A,const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ*)B->data; IS ip = b->row; PetscBool perm_identity; PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCholeskyFactorNumeric_SeqAIJ(B,A,info);CHKERRQ(ierr); /* determine which version of MatSolve needs to be used. */ ierr = ISIdentity(ip,&perm_identity);CHKERRQ(ierr); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } /* get the triangular factors */ ierr = MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEAnalyzeTransposeForSolve" static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cusparseMatrixType_t matrixType; cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscFunctionBegin; /*********************************************/ /* Now the Transpose of the Lower Tri Factor */ /*********************************************/ /* allocate space for the transpose of the lower triangular factor */ loTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&loTriFactorT->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(loTriFactorT->descr, indexBase);CHKERRCUDA(stat); stat = cusparseSetMatType(loTriFactorT->descr, matrixType);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(loTriFactorT->descr, fillMode);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(loTriFactorT->descr, diagType);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&loTriFactorT->solveInfo);CHKERRCUDA(stat); /* set the operation */ loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactor->csrMat->num_rows+1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactor->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactor->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* perform the solve analysis on the transposed matrix */ stat = cusparse_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ upTriFactorT = new Mat_SeqAIJCUSPARSETriFactorStruct; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr)==CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ stat = cusparseCreateMatDescr(&upTriFactorT->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(upTriFactorT->descr, indexBase);CHKERRCUDA(stat); stat = cusparseSetMatType(upTriFactorT->descr, matrixType);CHKERRCUDA(stat); stat = cusparseSetMatFillMode(upTriFactorT->descr, fillMode);CHKERRCUDA(stat); stat = cusparseSetMatDiagType(upTriFactorT->descr, diagType);CHKERRCUDA(stat); /* Create the solve analysis information */ stat = cusparseCreateSolveAnalysisInfo(&upTriFactorT->solveInfo);CHKERRCUDA(stat); /* set the operation */ upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactor->csrMat->num_rows+1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactor->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactor->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* perform the solve analysis on the transposed matrix */ stat = cusparse_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo);CHKERRCUDA(stat); /* assign the pointer. Is this really necessary? */ ((Mat_SeqAIJCUSPARSETriFactors*)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSEGenerateTransposeForMult" static PetscErrorCode MatSeqAIJCUSPARSEGenerateTransposeForMult(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; cusparseStatus_t stat; cusparseIndexBase_t indexBase; cudaError_t err; PetscFunctionBegin; /* allocate space for the triangular factor information */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstructT->descr);CHKERRCUDA(stat); indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = cusparseSetMatIndexBase(matstructT->descr, indexBase);CHKERRCUDA(stat); stat = cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); /* set alpha and beta */ err = cudaMalloc((void **)&(matstructT->alpha),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstructT->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMalloc((void **)&(matstructT->beta),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstructT->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *matrix = (CsrMatrix*)matstruct->mat; CsrMatrix *matrixT= new CsrMatrix; matrixT->num_rows = A->rmap->n; matrixT->num_cols = A->cmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); /* compute the transpose of the upper triangular factor, i.e. the CSC */ indexBase = cusparseGetMatIndexBase(matstruct->descr); stat = cusparse_csr2csc(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, matrix->num_entries, matrix->values->data().get(), matrix->row_offsets->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* assign the pointer */ matstructT->mat = matrixT; } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if CUDA_VERSION>=5000 /* First convert HYB to CSR */ CsrMatrix *temp= new CsrMatrix; temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get());CHKERRCUDA(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ CsrMatrix *tempT= new CsrMatrix; tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n+1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase);CHKERRCUDA(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition);CHKERRCUDA(stat); /* assign the pointer */ matstructT->mat = hybMat; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY*) tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32*) tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32*) tempT->row_offsets; delete (CsrMatrix*) tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY*) temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32*) temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32*) temp->row_offsets; delete (CsrMatrix*) temp; } #else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"ELL (Ellpack) and HYB (Hybrid) storage format for the Matrix Transpose (in MatMultTranspose) require CUDA 5.0 or later."); #endif } /* assign the compressed row indices */ matstructT->cprowIndices = new THRUSTINTARRAY; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE*)A->spptr)->matTranspose = matstructT; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE" static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); /* First, reorder with the row permutation */ thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU+n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU+n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering" static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { ierr = MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A);CHKERRQ(ierr); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); /* First, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, &ALPHA, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, &ALPHA, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* restore */ ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE" static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; VecType t; PetscBool flg; PetscFunctionBegin; ierr = VecGetType(bb,&t);CHKERRQ(ierr); ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #2). Can only deal with %s\n.",t,VECSEQCUDA); ierr = VecGetType(xx,&t);CHKERRQ(ierr); ierr = PetscStrcmp(t,VECSEQCUDA,&flg);CHKERRQ(ierr); if (!flg) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Vector of type %s passed into MatSolve_SeqAIJCUSPARSE (Arg #3). Can only deal with %s\n.",t,VECSEQCUDA); /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); /* First, reorder with the row permutation */ thrust::copy(thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), xGPU); /* Next, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, xarray, tempGPU->data().get());CHKERRCUDA(stat); /* Then, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(tempGPU->begin(), tempGPU->end(), xGPU); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSolve_SeqAIJCUSPARSE_NaturalOrdering" static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A,Vec bb,Vec xx) { const PetscScalar *barray; PetscScalar *xarray; cusparseStatus_t stat; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors*)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct*)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY*)cusparseTriFactors->workVector; PetscErrorCode ierr; PetscFunctionBegin; /* Get the GPU pointers */ ierr = VecCUDAGetArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(bb,&barray);CHKERRQ(ierr); /* First, solve L */ stat = cusparse_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, &ALPHA, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get());CHKERRCUDA(stat); /* Next, solve U */ stat = cusparse_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, &ALPHA, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray);CHKERRCUDA(stat); ierr = VecCUDARestoreArrayRead(bb,&barray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(xx,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*cusparseTriFactors->nnz - A->cmap->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatSeqAIJCUSPARSECopyToGPU" static PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; PetscInt m = A->rmap->n,*ii,*ridx; PetscErrorCode ierr; cusparseStatus_t stat; cudaError_t err; PetscFunctionBegin; if (A->valid_GPU_matrix == PETSC_CUDA_UNALLOCATED || A->valid_GPU_matrix == PETSC_CUDA_CPU) { ierr = PetscLogEventBegin(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); Mat_SeqAIJCUSPARSEMultStruct_Destroy(&matstruct,cusparsestruct->format); try { cusparsestruct->nonzerorow=0; for (int j = 0; j<m; j++) cusparsestruct->nonzerorow += ((a->i[j+1]-a->i[j])>0); if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { /* Forcing compressed row on the GPU */ int k=0; ierr = PetscMalloc1(cusparsestruct->nonzerorow+1, &ii);CHKERRQ(ierr); ierr = PetscMalloc1(cusparsestruct->nonzerorow, &ridx);CHKERRQ(ierr); ii[0]=0; for (int j = 0; j<m; j++) { if ((a->i[j+1]-a->i[j])>0) { ii[k] = a->i[j]; ridx[k]= j; k++; } } ii[cusparsestruct->nonzerorow] = a->nz; m = cusparsestruct->nonzerorow; } /* allocate space for the triangular factor information */ matstruct = new Mat_SeqAIJCUSPARSEMultStruct; stat = cusparseCreateMatDescr(&matstruct->descr);CHKERRCUDA(stat); stat = cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO);CHKERRCUDA(stat); stat = cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL);CHKERRCUDA(stat); err = cudaMalloc((void **)&(matstruct->alpha),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstruct->alpha,&ALPHA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); err = cudaMalloc((void **)&(matstruct->beta),sizeof(PetscScalar));CHKERRCUDA(err); err = cudaMemcpy(matstruct->beta,&BETA,sizeof(PetscScalar),cudaMemcpyHostToDevice);CHKERRCUDA(err); stat = cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE);CHKERRCUDA(stat); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *matrix= new CsrMatrix; matrix->num_rows = m; matrix->num_cols = A->cmap->n; matrix->num_entries = a->nz; matrix->row_offsets = new THRUSTINTARRAY32(m+1); matrix->row_offsets->assign(ii, ii + m+1); matrix->column_indices = new THRUSTINTARRAY32(a->nz); matrix->column_indices->assign(a->j, a->j+a->nz); matrix->values = new THRUSTARRAY(a->nz); matrix->values->assign(a->a, a->a+a->nz); /* assign the pointer */ matstruct->mat = matrix; } else if (cusparsestruct->format==MAT_CUSPARSE_ELL || cusparsestruct->format==MAT_CUSPARSE_HYB) { #if CUDA_VERSION>=4020 CsrMatrix *matrix= new CsrMatrix; matrix->num_rows = m; matrix->num_cols = A->cmap->n; matrix->num_entries = a->nz; matrix->row_offsets = new THRUSTINTARRAY32(m+1); matrix->row_offsets->assign(ii, ii + m+1); matrix->column_indices = new THRUSTINTARRAY32(a->nz); matrix->column_indices->assign(a->j, a->j+a->nz); matrix->values = new THRUSTARRAY(a->nz); matrix->values->assign(a->a, a->a+a->nz); cusparseHybMat_t hybMat; stat = cusparseCreateHybMat(&hybMat);CHKERRCUDA(stat); cusparseHybPartition_t partition = cusparsestruct->format==MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, matrix->num_rows, matrix->num_cols, matstruct->descr, matrix->values->data().get(), matrix->row_offsets->data().get(), matrix->column_indices->data().get(), hybMat, 0, partition);CHKERRCUDA(stat); /* assign the pointer */ matstruct->mat = hybMat; if (matrix) { if (matrix->values) delete (THRUSTARRAY*)matrix->values; if (matrix->column_indices) delete (THRUSTINTARRAY32*)matrix->column_indices; if (matrix->row_offsets) delete (THRUSTINTARRAY32*)matrix->row_offsets; delete (CsrMatrix*)matrix; } #endif } /* assign the compressed row indices */ matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx,ridx+m); /* assign the pointer */ cusparsestruct->mat = matstruct; if (!a->compressedrow.use) { ierr = PetscFree(ii);CHKERRQ(ierr); ierr = PetscFree(ridx);CHKERRQ(ierr); } cusparsestruct->workVector = new THRUSTARRAY; cusparsestruct->workVector->resize(m); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); A->valid_GPU_matrix = PETSC_CUDA_BOTH; ierr = PetscLogEventEnd(MAT_CUSPARSECopyToGPU,A,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCreateVecs_SeqAIJCUSPARSE" static PetscErrorCode MatCreateVecs_SeqAIJCUSPARSE(Mat mat, Vec *right, Vec *left) { PetscErrorCode ierr; PetscInt rbs,cbs; PetscFunctionBegin; ierr = MatGetBlockSizes(mat,&rbs,&cbs);CHKERRQ(ierr); if (right) { ierr = VecCreate(PetscObjectComm((PetscObject)mat),right);CHKERRQ(ierr); ierr = VecSetSizes(*right,mat->cmap->n,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetBlockSize(*right,cbs);CHKERRQ(ierr); ierr = VecSetType(*right,VECSEQCUDA);CHKERRQ(ierr); ierr = PetscLayoutReference(mat->cmap,&(*right)->map);CHKERRQ(ierr); } if (left) { ierr = VecCreate(PetscObjectComm((PetscObject)mat),left);CHKERRQ(ierr); ierr = VecSetSizes(*left,mat->rmap->n,PETSC_DETERMINE);CHKERRQ(ierr); ierr = VecSetBlockSize(*left,rbs);CHKERRQ(ierr); ierr = VecSetType(*left,VECSEQCUDA);CHKERRQ(ierr); ierr = PetscLayoutReference(mat->rmap,&(*left)->map);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; #undef __FUNCT__ #define __FUNCT__ "MatMult_SeqAIJCUSPARSE" static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstruct->beta, yarray);CHKERRCUDA(stat); } else { #if CUDA_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, matstruct->alpha, matstruct->descr, hybMat, xarray, matstruct->beta, yarray);CHKERRCUDA(stat); #endif } ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); if (!cusparsestruct->stream) { ierr = WaitForGPU();CHKERRCUDA(ierr); } ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultTranspose_SeqAIJCUSPARSE" static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; const PetscScalar *xarray; PetscScalar *yarray; PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ if (!matstructT) { ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(yy,&yarray);CHKERRQ(ierr); if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstructT->mat; stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstructT->alpha, matstructT->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstructT->beta, yarray);CHKERRCUDA(stat); } else { #if CUDA_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, matstructT->alpha, matstructT->descr, hybMat, xarray, matstructT->beta, yarray);CHKERRCUDA(stat); #endif } ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(yy,&yarray);CHKERRQ(ierr); if (!cusparsestruct->stream) { ierr = WaitForGPU();CHKERRCUDA(ierr); } ierr = PetscLogFlops(2.0*a->nz - cusparsestruct->nonzerorow);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultAdd_SeqAIJCUSPARSE" static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->mat; thrust::device_ptr<PetscScalar> zptr; const PetscScalar *xarray; PetscScalar *zarray; PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ try { ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); zptr = thrust::device_pointer_cast(zarray); /* multiply add */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstruct->mat; /* here we need to be careful to set the number of rows in the multiply to the number of compressed rows in the matrix ... which is equivalent to the size of the workVector */ stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstruct->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } else { #if CUDA_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; if (cusparsestruct->workVector->size()) { stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, matstruct->alpha, matstruct->descr, hybMat, xarray, matstruct->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } #endif } /* scatter the data from the temporary into the full vector with a += operation */ thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + cusparsestruct->workVector->size(), VecCUDAPlusEquals()); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatMultTransposeAdd_SeqAIJCUSPARSE" static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A,Vec xx,Vec yy,Vec zz) { Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE*)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; thrust::device_ptr<PetscScalar> zptr; const PetscScalar *xarray; PetscScalar *zarray; PetscErrorCode ierr; cusparseStatus_t stat; PetscFunctionBegin; /* The line below should not be necessary as it has been moved to MatAssemblyEnd_SeqAIJCUSPARSE ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); */ if (!matstructT) { ierr = MatSeqAIJCUSPARSEGenerateTransposeForMult(A);CHKERRQ(ierr); matstructT = (Mat_SeqAIJCUSPARSEMultStruct*)cusparsestruct->matTranspose; } try { ierr = VecCopy_SeqCUDA(yy,zz);CHKERRQ(ierr); ierr = VecCUDAGetArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDAGetArrayWrite(zz,&zarray);CHKERRQ(ierr); zptr = thrust::device_pointer_cast(zarray); /* multiply add with matrix transpose */ if (cusparsestruct->format==MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix*)matstructT->mat; /* here we need to be careful to set the number of rows in the multiply to the number of compressed rows in the matrix ... which is equivalent to the size of the workVector */ stat = cusparse_csr_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, mat->num_rows, mat->num_cols, mat->num_entries, matstructT->alpha, matstructT->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xarray, matstructT->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } else { #if CUDA_VERSION>=4020 cusparseHybMat_t hybMat = (cusparseHybMat_t)matstructT->mat; if (cusparsestruct->workVector->size()) { stat = cusparse_hyb_spmv(cusparsestruct->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, matstructT->alpha, matstructT->descr, hybMat, xarray, matstructT->beta, cusparsestruct->workVector->data().get());CHKERRCUDA(stat); } #endif } /* scatter the data from the temporary into the full vector with a += operation */ thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstructT->cprowIndices->begin()))) + cusparsestruct->workVector->size(), VecCUDAPlusEquals()); ierr = VecCUDARestoreArrayRead(xx,&xarray);CHKERRQ(ierr); ierr = VecCUDARestoreArrayWrite(zz,&zarray);CHKERRQ(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSPARSE error: %s", ex); } ierr = WaitForGPU();CHKERRCUDA(ierr); ierr = PetscLogFlops(2.0*a->nz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatAssemblyEnd_SeqAIJCUSPARSE" static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A,MatAssemblyType mode) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr); if (A->factortype==MAT_FACTOR_NONE) { ierr = MatSeqAIJCUSPARSECopyToGPU(A);CHKERRQ(ierr); } if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0); A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; PetscFunctionReturn(0); } /* --------------------------------------------------------------------------------*/ #undef __FUNCT__ #define __FUNCT__ "MatCreateSeqAIJCUSPARSE" /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in AIJ (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVidia GPUs and use the CUSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective on MPI_Comm Input Parameters: + comm - MPI communicator, set to PETSC_COMM_SELF . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or NULL Output Parameter: . A - the matrix It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] Notes: If nnz is given then nz is ignored The AIJ format (also called the Yale sparse matrix format or compressed row storage), is fully compatible with standard Fortran 77 storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set nz=PETSC_DEFAULT and nnz=NULL for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. Level: intermediate .seealso: MatCreate(), MatCreateAIJ(), MatSetValues(), MatSeqAIJSetColumnIndices(), MatCreateSeqAIJWithArrays(), MatCreateAIJ(), MATSEQAIJCUSPARSE, MATAIJCUSPARSE @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatCreate(comm,A);CHKERRQ(ierr); ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr); ierr = MatSetType(*A,MATSEQAIJCUSPARSE);CHKERRQ(ierr); ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,(PetscInt*)nnz);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatDestroy_SeqAIJCUSPARSE" static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscErrorCode ierr; PetscFunctionBegin; if (A->factortype==MAT_FACTOR_NONE) { if (A->valid_GPU_matrix != PETSC_CUDA_UNALLOCATED) { ierr = Mat_SeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE**)&A->spptr);CHKERRQ(ierr); } } else { ierr = Mat_SeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors**)&A->spptr);CHKERRQ(ierr); } ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "MatCreate_SeqAIJCUSPARSE" PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscErrorCode ierr; cusparseStatus_t stat; cusparseHandle_t handle=0; PetscFunctionBegin; ierr = MatCreate_SeqAIJ(B);CHKERRQ(ierr); if (B->factortype==MAT_FACTOR_NONE) { /* you cannot check the inode.use flag here since the matrix was just created. now build a GPU matrix data structure */ B->spptr = new Mat_SeqAIJCUSPARSE; ((Mat_SeqAIJCUSPARSE*)B->spptr)->mat = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->matTranspose = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->workVector = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->format = MAT_CUSPARSE_CSR; ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = 0; stat = cusparseCreate(&handle);CHKERRCUDA(stat); ((Mat_SeqAIJCUSPARSE*)B->spptr)->handle = handle; ((Mat_SeqAIJCUSPARSE*)B->spptr)->stream = 0; } else { /* NEXT, set the pointers to the triangular factors */ B->spptr = new Mat_SeqAIJCUSPARSETriFactors; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtr = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtr = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->loTriFactorPtrTranspose = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->upTriFactorPtrTranspose = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->rpermIndices = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->cpermIndices = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->workVector = 0; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = 0; stat = cusparseCreate(&handle);CHKERRCUDA(stat); ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->handle = handle; ((Mat_SeqAIJCUSPARSETriFactors*)B->spptr)->nnz = 0; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->getvecs = MatCreateVecs_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->mult = MatMult_SeqAIJCUSPARSE; B->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; B->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; B->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; ierr = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCUSPARSE);CHKERRQ(ierr); B->valid_GPU_matrix = PETSC_CUDA_UNALLOCATED; ierr = PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE);CHKERRQ(ierr); PetscFunctionReturn(0); } /*M MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on Nvidia GPUs. These matrices can be in either CSR, ELL, or Hybrid format. The ELL and HYB formats require CUDA 4.2 or later. All matrix calculations are performed on Nvidia GPUs using the CUSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to MatSetFromOptions() . -mat_cusparse_storage_format csr - sets the storage format of matrices (for MatMult and factors in MatSolve) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for MatMult) during a call to MatSetFromOptions(). Other options include ell (ellpack) or hyb (hybrid). Level: beginner .seealso: MatCreateSeqAIJCUSPARSE(), MATAIJCUSPARSE, MatCreateAIJCUSPARSE(), MatCUSPARSESetFormat(), MatCUSPARSEStorageFormat, MatCUSPARSEFormatOperation M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat,MatFactorType,Mat*); #undef __FUNCT__ #define __FUNCT__ "MatSolverPackageRegister_CUSPARSE" PETSC_EXTERN PetscErrorCode MatSolverPackageRegister_CUSPARSE(void) { PetscErrorCode ierr; PetscFunctionBegin; ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_LU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_CHOLESKY,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ILU,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); ierr = MatSolverPackageRegister(MATSOLVERCUSPARSE,MATSEQAIJCUSPARSE,MAT_FACTOR_ICC,MatGetFactor_seqaijcusparse_cusparse);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSE_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { cusparseStatus_t stat; cusparseHandle_t handle; PetscFunctionBegin; if (*cusparsestruct) { Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat,(*cusparsestruct)->format); Mat_SeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose,(*cusparsestruct)->format); delete (*cusparsestruct)->workVector; if (handle = (*cusparsestruct)->handle) { stat = cusparseDestroy(handle);CHKERRCUDA(stat); } delete *cusparsestruct; *cusparsestruct = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "CsrMatrix_Destroy" static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactorStruct_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { cusparseStatus_t stat; PetscErrorCode ierr; PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) { stat = cusparseDestroyMatDescr((*trifactor)->descr);CHKERRCUDA(stat); } if ((*trifactor)->solveInfo) { stat = cusparseDestroySolveAnalysisInfo((*trifactor)->solveInfo);CHKERRCUDA(stat); } ierr = CsrMatrix_Destroy(&(*trifactor)->csrMat);CHKERRQ(ierr); delete *trifactor; *trifactor = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSEMultStruct_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct,MatCUSPARSEStorageFormat format) { CsrMatrix *mat; cusparseStatus_t stat; cudaError_t err; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format==MAT_CUSPARSE_ELL || format==MAT_CUSPARSE_HYB) { cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; stat = cusparseDestroyHybMat(hybMat);CHKERRCUDA(stat); } else { mat = (CsrMatrix*)(*matstruct)->mat; CsrMatrix_Destroy(&mat); } } if ((*matstruct)->descr) { stat = cusparseDestroyMatDescr((*matstruct)->descr);CHKERRCUDA(stat); } delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha) { err=cudaFree((*matstruct)->alpha);CHKERRCUDA(err); } if ((*matstruct)->beta) { err=cudaFree((*matstruct)->beta);CHKERRCUDA(err); } delete *matstruct; *matstruct = 0; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "Mat_SeqAIJCUSPARSETriFactors_Destroy" static PetscErrorCode Mat_SeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors** trifactors) { cusparseHandle_t handle; cusparseStatus_t stat; PetscFunctionBegin; if (*trifactors) { Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtr); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtr); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->loTriFactorPtrTranspose); Mat_SeqAIJCUSPARSETriFactorStruct_Destroy(&(*trifactors)->upTriFactorPtrTranspose); delete (*trifactors)->rpermIndices; delete (*trifactors)->cpermIndices; delete (*trifactors)->workVector; if (handle = (*trifactors)->handle) { stat = cusparseDestroy(handle);CHKERRCUDA(stat); } delete *trifactors; *trifactors = 0; } PetscFunctionReturn(0); }
54717eaa51d1b5a0fba226574ca7ec6b9fd0f227.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int idx = threadIdx.x + blockDim.x * blockIdx.x; greyImage[idx] = .299f * rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(numCols); //TODO const dim3 gridSize(numRows); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
54717eaa51d1b5a0fba226574ca7ec6b9fd0f227.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset int idx = threadIdx.x + blockDim.x * blockIdx.x; greyImage[idx] = .299f * rgbaImage[idx].x + .587f * rgbaImage[idx].y + .114f * rgbaImage[idx].z; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(numCols); //TODO const dim3 gridSize(numRows); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
0d964c79d4f42d06688fd01c052b9ecfdbb67630.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPairwise.cu" #else THC_API void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorAddConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorAddConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorSubConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorSubConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorMulConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorMulConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(value != ScalarConvert<int, real>::to(0), 3, "divide by zero"); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorDivConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorDivConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorFmodOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorFmodOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorRemainderOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorRemainderOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(tril)(THCState *state, THCTensor *self_, THCTensor *src_, long k) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(src_->nDimension == 2, 1, "expected a matrix"); THCTensor *src = src_; if (self_ == src_) src = THCTensor_(newContiguous)(state, src_); long stride0 = src->stride[0]; long stride1 = src->stride[1]; real *start = THCTensor_(data)(state, src) + src->storageOffset; TensorTriOp<real, 0> op(start, stride0, stride1, k); if (self_ == src_) { if (!THC_pointwiseApply1(state, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } if (self_ == src_) THCTensor_(freeCopyTo)(state, src, src_); THCudaCheck(hipGetLastError()); } void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, long k) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(src_->nDimension == 2, 1, "expected a matrix"); THCTensor *src = src_; if (self_ == src_) src = THCTensor_(newContiguous)(state, src_); long stride0 = src->stride[0]; long stride1 = src->stride[1]; real *start = THCTensor_(data)(state, src) + src->storageOffset; TensorTriOp<real, 1> op(start, stride0, stride1, k); if (self_ == src_) { if (!THC_pointwiseApply1(state, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } if (self_ == src_) THCTensor_(freeCopyTo)(state, src, src_); THCudaCheck(hipGetLastError()); } THC_API int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (!THCTensor_(isSameSizeAs(state, self_, src_))) { return 0; } // This is not as efficient as TH, but the basic idea: create a buffer that stores // 1 if the two tensors are equal at a position, otherwise 0. If the minimum value // in this buffer is 1, the two tensors are equal, otherwise they are not THLongStorage *size = THCTensor_(newSizeOf)(state, self_); THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, size, NULL); if (!THC_pointwiseApply3(state, buf, self_, src_, TensorEQOp<real, unsigned char>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } unsigned char min = THCudaByteTensor_minall(state, buf); THLongStorage_free(size); THCudaByteTensor_free(state, buf); return min != 0; } #endif
0d964c79d4f42d06688fd01c052b9ecfdbb67630.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathPairwise.cu" #else THC_API void THCTensor_(add)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorAddConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorAddConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(sub)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorSubConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorSubConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorMulConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorMulConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(value != ScalarConvert<int, real>::to(0), 3, "divide by zero"); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorDivConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorDivConstantOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorFmodOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorFmodOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, real value) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (self_ == src_) { if (!THC_pointwiseApply1(state, self_, TensorRemainderOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src_); if (!THC_pointwiseApply2(state, self_, src_, TensorRemainderOp<real>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(tril)(THCState *state, THCTensor *self_, THCTensor *src_, long k) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(src_->nDimension == 2, 1, "expected a matrix"); THCTensor *src = src_; if (self_ == src_) src = THCTensor_(newContiguous)(state, src_); long stride0 = src->stride[0]; long stride1 = src->stride[1]; real *start = THCTensor_(data)(state, src) + src->storageOffset; TensorTriOp<real, 0> op(start, stride0, stride1, k); if (self_ == src_) { if (!THC_pointwiseApply1(state, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } if (self_ == src_) THCTensor_(freeCopyTo)(state, src, src_); THCudaCheck(cudaGetLastError()); } void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, long k) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); THArgCheck(src_->nDimension == 2, 1, "expected a matrix"); THCTensor *src = src_; if (self_ == src_) src = THCTensor_(newContiguous)(state, src_); long stride0 = src->stride[0]; long stride1 = src->stride[1]; real *start = THCTensor_(data)(state, src) + src->storageOffset; TensorTriOp<real, 1> op(start, stride0, stride1, k); if (self_ == src_) { if (!THC_pointwiseApply1(state, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2(state, self_, src, op)) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } if (self_ == src_) THCTensor_(freeCopyTo)(state, src, src_); THCudaCheck(cudaGetLastError()); } THC_API int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) { THAssert(THCTensor_(checkGPU)(state, 2, self_, src_)); if (!THCTensor_(isSameSizeAs(state, self_, src_))) { return 0; } // This is not as efficient as TH, but the basic idea: create a buffer that stores // 1 if the two tensors are equal at a position, otherwise 0. If the minimum value // in this buffer is 1, the two tensors are equal, otherwise they are not THLongStorage *size = THCTensor_(newSizeOf)(state, self_); THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, size, NULL); if (!THC_pointwiseApply3(state, buf, self_, src_, TensorEQOp<real, unsigned char>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } unsigned char min = THCudaByteTensor_minall(state, buf); THLongStorage_free(size); THCudaByteTensor_free(state, buf); return min != 0; } #endif
7ea1c02c33ccbccd912c18ee69d379ca3544ea3a.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/detail/column_utilities.hpp> #include <jit/type.hpp> #include <rmm/exec_policy.hpp> #include <thrust/equal.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/logical.h> #include <numeric> #include <sstream> #include "cudf/detail/utilities/vector_factories.hpp" #include "rmm/cuda_stream_view.hpp" namespace cudf { namespace test { namespace { template <bool check_exact_equality> struct column_property_comparator { bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs) { return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs; } void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs) { if (check_exact_equality) { EXPECT_EQ(lhs.type(), rhs.type()); } else { EXPECT_TRUE(types_equivalent(lhs.type(), rhs.type())); } EXPECT_EQ(lhs.size(), rhs.size()); if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); } EXPECT_EQ(lhs.null_count(), rhs.null_count()); // equivalent, but not exactly equal columns can have a different number of children if their // sizes are both 0. Specifically, empty string columns may or may not have children. if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) { EXPECT_EQ(lhs.num_children(), rhs.num_children()); } } template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs) { compare_common(lhs, rhs); } template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs) { compare_common(lhs, rhs); cudf::lists_column_view lhs_l(lhs); cudf::lists_column_view rhs_l(rhs); // recurse cudf::type_dispatcher(lhs_l.child().type(), column_property_comparator<check_exact_equality>{}, lhs_l.get_sliced_child(rmm::cuda_stream_default), rhs_l.get_sliced_child(rmm::cuda_stream_default)); } }; class corresponding_rows_unequal { public: corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs) { } cudf::row_equality_comparator<true> comp; __device__ bool operator()(size_type index) { return !comp(index, index); } }; class corresponding_rows_not_equivalent { table_device_view d_lhs; table_device_view d_rhs; public: corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs) : d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs) { CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1, "Unsupported number of columns"); } struct typed_element_not_equivalent { template <typename T> __device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()( column_device_view const& lhs, column_device_view const& rhs, size_type index) { if (lhs.is_valid(index) and rhs.is_valid(index)) { T const x = lhs.element<T>(index); T const y = rhs.element<T>(index); // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { constexpr int ulp = 4; // ulp = unit of least precision, value taken from google test T const abs_x_minus_y = std::abs(x - y); return abs_x_minus_y >= std::numeric_limits<T>::min() && abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp; } } else { // if either is null, then the inequality was checked already return true; } } template <typename T, typename... Args> __device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args...) { // Non-floating point inequality is checked already return true; } }; cudf::row_equality_comparator<true> comp; __device__ bool operator()(size_type index) { if (not comp(index, index)) { auto lhs_col = this->d_lhs.column(0); auto rhs_col = this->d_rhs.column(0); return type_dispatcher( lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index); } return false; } }; // Stringify the inconsistent values resulted from the comparison of two columns element-wise std::string stringify_column_differences(cudf::device_span<int const> differences, column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty"); std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : ""; // move the differences to the host. auto h_differences = cudf::detail::make_host_vector_sync(differences); if (print_all_differences) { std::ostringstream buffer; buffer << depth_str << "differences:" << std::endl; auto source_table = cudf::table_view({lhs, rhs}); auto diff_column = fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end()); auto diff_table = cudf::gather(source_table, diff_column); // Need to pull back the differences auto const h_left_strings = to_strings(diff_table->get_column(0)); auto const h_right_strings = to_strings(diff_table->get_column(1)); for (size_t i = 0; i < h_differences.size(); ++i) buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs[" << h_differences[i] << "] = " << h_right_strings[i] << std::endl; return buffer.str(); } else { int index = h_differences[0]; // only stringify first difference auto diff_lhs = cudf::detail::slice(lhs, index, index + 1); auto diff_rhs = cudf::detail::slice(rhs, index, index + 1); return depth_str + "first difference: " + "lhs[" + std::to_string(index) + "] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) + "] = " + to_string(diff_rhs, ""); } } // non-nested column types template <typename T, bool check_exact_equality> struct column_comparator_impl { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { auto d_lhs = cudf::table_device_view::create(table_view{{lhs}}); auto d_rhs = cudf::table_device_view::create(table_view{{rhs}}); using ComparatorType = std::conditional_t<check_exact_equality, corresponding_rows_unequal, corresponding_rows_not_equivalent>; auto differences = rmm::device_uvector<int>( lhs.size(), rmm::cuda_stream_default); // worst case: everything different auto diff_iter = thrust::copy_if(rmm::exec_policy(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lhs.size()), differences.begin(), ComparatorType(*d_lhs, *d_rhs)); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, print_all_differences, depth); } }; // forward declaration for nested-type recursion. template <bool check_exact_equality> struct column_comparator; // specialization for list columns template <bool check_exact_equality> struct column_comparator_impl<list_view, check_exact_equality> { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { lists_column_view lhs_l(lhs); lists_column_view rhs_l(rhs); CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch"); if (lhs_l.is_empty()) { return; } // worst case - everything is different rmm::device_uvector<int> differences(lhs.size(), rmm::cuda_stream_default); // TODO : determine how equals/equivalency should work for columns with divergent underlying // data, but equivalent null masks. Example: // // List<int32_t>: // Length : 3 // Offsets : 0, 3, 5, 5 // Nulls: 011 // Children : // 1, 2, 3, 4, 5 // // List<int32_t>: // Length : 3 // Offsets : 0, 3, 5, 7 // Nulls: 011 // Children : // 1, 2, 3, 4, 5, 7, 8 // // These two columns are seemingly equivalent, since their top level rows are the same, with // just the last element being null. However, pyArrow will say these are -not- equal and // does not appear to have an equivalent() check. So the question is : should we be handling // this case when someone calls expect_columns_equivalent()? // compare offsets, taking slicing into account // left side size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), rmm::cuda_stream_default); auto lhs_offsets = thrust::make_transform_iterator( lhs_l.offsets().begin<size_type>() + lhs_l.offset(), [lhs_shift] __device__(size_type offset) { return offset - lhs_shift; }); auto lhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // right side size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), rmm::cuda_stream_default); auto rhs_offsets = thrust::make_transform_iterator( rhs_l.offsets().begin<size_type>() + rhs_l.offset(), [rhs_shift] __device__(size_type offset) { return offset - rhs_shift; }); auto rhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); auto diff_iter = thrust::copy_if( rmm::exec_policy(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lhs_l.size() + 1), differences.begin(), [lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__( size_type index) { // last offset has no validity associated with it if (index < num_rows - 1) { if (lhs_valids[index] != rhs_valids[index]) { return true; } // if validity matches -and- is false, we can ignore the actual values. this // is technically not checking "equal()", but it's how the non-list code path handles it if (!lhs_valids[index]) { return false; } } return lhs_offsets[index] == rhs_offsets[index] ? false : true; }); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, print_all_differences, depth); // recurse auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default); auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default); cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, print_all_differences, depth + 1); } }; template <bool check_exact_equality> struct column_comparator_impl<struct_view, check_exact_equality> { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); std::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + lhs.num_children(), [&](auto i) { column_view lhs_child = l_scv.get_sliced_child(i); column_view rhs_child = r_scv.get_sliced_child(i); cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, print_all_differences, depth + 1); }); } }; template <bool check_exact_equality> struct column_comparator { template <typename T> void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth = 0) { // compare properties cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs); // compare values column_comparator_impl<T, check_exact_equality> comparator{}; comparator(lhs, rhs, print_all_differences, depth); } }; } // namespace /** * @copydoc cudf::test::expect_column_properties_equal */ void expect_column_properties_equal(column_view const& lhs, column_view const& rhs) { cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs); } /** * @copydoc cudf::test::expect_column_properties_equivalent */ void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs) { cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs); } /** * @copydoc cudf::test::expect_columns_equal */ void expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, bool print_all_differences) { cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences); } /** * @copydoc cudf::test::expect_columns_equivalent */ void expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, bool print_all_differences) { cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences); } /** * @copydoc cudf::test::expect_equal_buffers */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes) { if (size_bytes > 0) { EXPECT_NE(nullptr, lhs); EXPECT_NE(nullptr, rhs); } auto typed_lhs = static_cast<char const*>(lhs); auto typed_rhs = static_cast<char const*>(rhs); EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs)); } /** * @copydoc cudf::test::bitmask_to_host */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) { if (c.nullable()) { auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type); std::vector<bitmask_type> host_bitmask(num_bitmasks); if (c.offset() == 0) { CUDA_TRY(hipMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), hipMemcpyDeviceToHost)); } else { auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size()); CUDA_TRY(hipMemcpy(host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), hipMemcpyDeviceToHost)); } return host_bitmask; } else { return std::vector<bitmask_type>{}; } } namespace { template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr> static auto numeric_to_string_precise(T value) { return std::to_string(value); } template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr> static auto numeric_to_string_precise(T value) { std::ostringstream o; o << std::setprecision(std::numeric_limits<T>::max_digits10) << value; return o.str(); } static auto duration_suffix(cudf::duration_D) { return " days"; } static auto duration_suffix(cudf::duration_s) { return " seconds"; } static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; } static auto duration_suffix(cudf::duration_us) { return " microseconds"; } static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; } std::string get_nested_type_str(cudf::column_view const& view) { if (view.type().id() == cudf::type_id::LIST) { lists_column_view lcv(view); return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">"; } if (view.type().id() == cudf::type_id::STRUCT) { std::ostringstream out; out << cudf::jit::get_type_name(view.type()) + "<"; std::transform(view.child_begin(), view.child_end(), std::ostream_iterator<std::string>(out, ","), [&out](auto const col) { return get_nested_type_str(col); }); out << ">"; return out.str(); } return cudf::jit::get_type_name(view.type()); } template <typename NestedColumnView> std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ") { column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index); CUDF_EXPECTS(offsets.type().id() == type_id::INT32, "Column does not appear to be an offsets column"); CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!"); size_type output_size = c.size() + 1; // the first offset value to normalize everything against size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), rmm::cuda_stream_default); rmm::device_uvector<size_type> shifted_offsets(output_size, rmm::cuda_stream_default); // normalize the offset values for the column offset size_type const* d_offsets = offsets.head<size_type>() + c.offset(); thrust::transform( rmm::exec_policy(), d_offsets, d_offsets + output_size, shifted_offsets.begin(), [first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); }); auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets); std::ostringstream buffer; for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) { buffer << h_shifted_offsets[idx]; if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; } } return buffer.str(); } struct column_view_printer { template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el); }); } } template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { // // For timestamps, convert timestamp column to column of strings, then // call string version // auto col_as_strings = cudf::strings::from_timestamps(col); if (col_as_strings->size() == 0) { return; } this->template operator()<cudf::string_view>(*col_as_strings, out, indent); } template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto const h_data = cudf::test::to_host<Element>(col); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), std::back_inserter(out), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? static_cast<std::string>(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(std::cbegin(h_data.first), std::cend(h_data.first), std::back_inserter(out), [col](auto const& fp) { return static_cast<std::string>(fp); }); } } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { // // Implementation for strings, call special to_host variant // if (col.is_empty()) return; auto h_data = cudf::test::to_host<std::string>(col); out.resize(col.size()); std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL"); }); } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { cudf::dictionary_column_view dictionary(col); if (col.is_empty()) return; std::vector<std::string> keys = to_strings(dictionary.keys()); std::vector<std::string> indices = to_strings({dictionary.indices().type(), dictionary.size(), dictionary.indices().head(), dictionary.null_mask(), dictionary.null_count(), dictionary.offset()}); out.insert(out.end(), keys.begin(), keys.end()); if (!indices.empty()) { std::string first = "\x08 : " + indices.front(); // use : as delimiter out.push_back(first); // between keys and indices out.insert(out.end(), indices.begin() + 1, indices.end()); } } // Print the tick counts with the units template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx].count()) + duration_suffix(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el.count()) + duration_suffix(el); }); } } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { lists_column_view lcv(col); // propagate slicing to the child if necessary column_view child = lcv.get_sliced_child(rmm::cuda_stream_default); bool const is_sliced = lcv.offset() > 0 || child.offset() > 0; std::string tmp = get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent + "Length : " + std::to_string(lcv.size()) + "\n" + indent + "Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" + (lcv.parent().nullable() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" + detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n" : "") + // non-nested types don't typically display their null masks, so do it here for convenience. (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + (detail::to_string(child, ", ", indent + " ")) + "\n"; out.push_back(tmp); } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { structs_column_view view{col}; std::ostringstream out_stream; out_stream << get_nested_type_str(col) << ":\n" << indent << "Length : " << view.size() << ":\n"; if (view.nullable()) { out_stream << indent << "Null count: " << view.null_count() << "\n" << detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n"; } auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + view.num_children(), std::ostream_iterator<std::string>(out_stream, "\n"), [&](size_type index) { auto child = view.get_sliced_child(index); // non-nested types don't typically display their null masks, so do it here for convenience. return (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + detail::to_string(child, ", ", indent + " "); }); out.push_back(out_stream.str()); } }; } // namespace namespace detail { /** * @copydoc cudf::test::detail::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent) { std::vector<std::string> reply; cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent); return reply; } /** * @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string) * * @param indent Indentation for all output */ std::string to_string(cudf::column_view const& col, std::string const& delimiter, std::string const& indent) { std::ostringstream buffer; std::vector<std::string> h_data = to_strings(col, indent); buffer << indent; std::copy(h_data.begin(), h_data.end() - (!h_data.empty()), std::ostream_iterator<std::string>(buffer, delimiter.c_str())); if (!h_data.empty()) buffer << h_data.back(); return buffer.str(); } /** * @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string) * * @param indent Indentation for all output. See comment in `to_strings` for * a detailed description. */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size, std::string const& indent) { std::ostringstream buffer; buffer << indent; for (int idx = null_mask_size - 1; idx >= 0; idx--) { buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0"); } return buffer.str(); } } // namespace detail /** * @copydoc cudf::test::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col) { return detail::to_strings(col); } /** * @copydoc cudf::test::to_string(cudf::column_view, std::string) */ std::string to_string(cudf::column_view const& col, std::string const& delimiter) { return detail::to_string(col, delimiter); } /** * @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type) */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size) { return detail::to_string(null_mask, null_mask_size); } /** * @copydoc cudf::test::print */ void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter) { os << to_string(col, delimiter) << std::endl; } /** * @copydoc cudf::test::validate_host_masks */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask, size_type number_of_elements) { return std::all_of(thrust::make_counting_iterator(0), thrust::make_counting_iterator(number_of_elements), [&expected_mask, &got_mask](auto index) { return cudf::bit_is_set(expected_mask.data(), index) == cudf::bit_is_set(got_mask.data(), index); }); } } // namespace test } // namespace cudf
7ea1c02c33ccbccd912c18ee69d379ca3544ea3a.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/lists/lists_column_view.hpp> #include <cudf/strings/convert/convert_datetime.hpp> #include <cudf/structs/struct_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/row_operators.cuh> #include <cudf/table/table_device_view.cuh> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf_test/column_utilities.hpp> #include <cudf_test/column_wrapper.hpp> #include <cudf_test/cudf_gtest.hpp> #include <cudf_test/detail/column_utilities.hpp> #include <jit/type.hpp> #include <rmm/exec_policy.hpp> #include <thrust/equal.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/logical.h> #include <numeric> #include <sstream> #include "cudf/detail/utilities/vector_factories.hpp" #include "rmm/cuda_stream_view.hpp" namespace cudf { namespace test { namespace { template <bool check_exact_equality> struct column_property_comparator { bool types_equivalent(cudf::data_type const& lhs, cudf::data_type const& rhs) { return is_fixed_point(lhs) ? lhs.id() == rhs.id() : lhs == rhs; } void compare_common(cudf::column_view const& lhs, cudf::column_view const& rhs) { if (check_exact_equality) { EXPECT_EQ(lhs.type(), rhs.type()); } else { EXPECT_TRUE(types_equivalent(lhs.type(), rhs.type())); } EXPECT_EQ(lhs.size(), rhs.size()); if (lhs.size() > 0 && check_exact_equality) { EXPECT_EQ(lhs.nullable(), rhs.nullable()); } EXPECT_EQ(lhs.null_count(), rhs.null_count()); // equivalent, but not exactly equal columns can have a different number of children if their // sizes are both 0. Specifically, empty string columns may or may not have children. if (check_exact_equality || (lhs.size() > 0 && lhs.null_count() < lhs.size())) { EXPECT_EQ(lhs.num_children(), rhs.num_children()); } } template <typename T, std::enable_if_t<!std::is_same<T, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs) { compare_common(lhs, rhs); } template <typename T, std::enable_if_t<std::is_same<T, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& lhs, cudf::column_view const& rhs) { compare_common(lhs, rhs); cudf::lists_column_view lhs_l(lhs); cudf::lists_column_view rhs_l(rhs); // recurse cudf::type_dispatcher(lhs_l.child().type(), column_property_comparator<check_exact_equality>{}, lhs_l.get_sliced_child(rmm::cuda_stream_default), rhs_l.get_sliced_child(rmm::cuda_stream_default)); } }; class corresponding_rows_unequal { public: corresponding_rows_unequal(table_device_view d_lhs, table_device_view d_rhs) : comp(d_lhs, d_rhs) { } cudf::row_equality_comparator<true> comp; __device__ bool operator()(size_type index) { return !comp(index, index); } }; class corresponding_rows_not_equivalent { table_device_view d_lhs; table_device_view d_rhs; public: corresponding_rows_not_equivalent(table_device_view d_lhs, table_device_view d_rhs) : d_lhs(d_lhs), d_rhs(d_rhs), comp(d_lhs, d_rhs) { CUDF_EXPECTS(d_lhs.num_columns() == 1 and d_rhs.num_columns() == 1, "Unsupported number of columns"); } struct typed_element_not_equivalent { template <typename T> __device__ std::enable_if_t<std::is_floating_point<T>::value, bool> operator()( column_device_view const& lhs, column_device_view const& rhs, size_type index) { if (lhs.is_valid(index) and rhs.is_valid(index)) { T const x = lhs.element<T>(index); T const y = rhs.element<T>(index); // Must handle inf and nan separately if (std::isinf(x) || std::isinf(y)) { return x != y; // comparison of (inf==inf) returns true } else if (std::isnan(x) || std::isnan(y)) { return std::isnan(x) != std::isnan(y); // comparison of (nan==nan) returns false } else { constexpr int ulp = 4; // ulp = unit of least precision, value taken from google test T const abs_x_minus_y = std::abs(x - y); return abs_x_minus_y >= std::numeric_limits<T>::min() && abs_x_minus_y > std::numeric_limits<T>::epsilon() * std::abs(x + y) * ulp; } } else { // if either is null, then the inequality was checked already return true; } } template <typename T, typename... Args> __device__ std::enable_if_t<not std::is_floating_point<T>::value, bool> operator()(Args...) { // Non-floating point inequality is checked already return true; } }; cudf::row_equality_comparator<true> comp; __device__ bool operator()(size_type index) { if (not comp(index, index)) { auto lhs_col = this->d_lhs.column(0); auto rhs_col = this->d_rhs.column(0); return type_dispatcher( lhs_col.type(), typed_element_not_equivalent{}, lhs_col, rhs_col, index); } return false; } }; // Stringify the inconsistent values resulted from the comparison of two columns element-wise std::string stringify_column_differences(cudf::device_span<int const> differences, column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { CUDF_EXPECTS(not differences.empty(), "Shouldn't enter this function if `differences` is empty"); std::string const depth_str = depth > 0 ? "depth " + std::to_string(depth) + '\n' : ""; // move the differences to the host. auto h_differences = cudf::detail::make_host_vector_sync(differences); if (print_all_differences) { std::ostringstream buffer; buffer << depth_str << "differences:" << std::endl; auto source_table = cudf::table_view({lhs, rhs}); auto diff_column = fixed_width_column_wrapper<int32_t>(h_differences.begin(), h_differences.end()); auto diff_table = cudf::gather(source_table, diff_column); // Need to pull back the differences auto const h_left_strings = to_strings(diff_table->get_column(0)); auto const h_right_strings = to_strings(diff_table->get_column(1)); for (size_t i = 0; i < h_differences.size(); ++i) buffer << depth_str << "lhs[" << h_differences[i] << "] = " << h_left_strings[i] << ", rhs[" << h_differences[i] << "] = " << h_right_strings[i] << std::endl; return buffer.str(); } else { int index = h_differences[0]; // only stringify first difference auto diff_lhs = cudf::detail::slice(lhs, index, index + 1); auto diff_rhs = cudf::detail::slice(rhs, index, index + 1); return depth_str + "first difference: " + "lhs[" + std::to_string(index) + "] = " + to_string(diff_lhs, "") + ", rhs[" + std::to_string(index) + "] = " + to_string(diff_rhs, ""); } } // non-nested column types template <typename T, bool check_exact_equality> struct column_comparator_impl { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { auto d_lhs = cudf::table_device_view::create(table_view{{lhs}}); auto d_rhs = cudf::table_device_view::create(table_view{{rhs}}); using ComparatorType = std::conditional_t<check_exact_equality, corresponding_rows_unequal, corresponding_rows_not_equivalent>; auto differences = rmm::device_uvector<int>( lhs.size(), rmm::cuda_stream_default); // worst case: everything different auto diff_iter = thrust::copy_if(rmm::exec_policy(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lhs.size()), differences.begin(), ComparatorType(*d_lhs, *d_rhs)); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, print_all_differences, depth); } }; // forward declaration for nested-type recursion. template <bool check_exact_equality> struct column_comparator; // specialization for list columns template <bool check_exact_equality> struct column_comparator_impl<list_view, check_exact_equality> { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { lists_column_view lhs_l(lhs); lists_column_view rhs_l(rhs); CUDF_EXPECTS(lhs_l.size() == rhs_l.size(), "List column size mismatch"); if (lhs_l.is_empty()) { return; } // worst case - everything is different rmm::device_uvector<int> differences(lhs.size(), rmm::cuda_stream_default); // TODO : determine how equals/equivalency should work for columns with divergent underlying // data, but equivalent null masks. Example: // // List<int32_t>: // Length : 3 // Offsets : 0, 3, 5, 5 // Nulls: 011 // Children : // 1, 2, 3, 4, 5 // // List<int32_t>: // Length : 3 // Offsets : 0, 3, 5, 7 // Nulls: 011 // Children : // 1, 2, 3, 4, 5, 7, 8 // // These two columns are seemingly equivalent, since their top level rows are the same, with // just the last element being null. However, pyArrow will say these are -not- equal and // does not appear to have an equivalent() check. So the question is : should we be handling // this case when someone calls expect_columns_equivalent()? // compare offsets, taking slicing into account // left side size_type lhs_shift = cudf::detail::get_value<size_type>(lhs_l.offsets(), lhs_l.offset(), rmm::cuda_stream_default); auto lhs_offsets = thrust::make_transform_iterator( lhs_l.offsets().begin<size_type>() + lhs_l.offset(), [lhs_shift] __device__(size_type offset) { return offset - lhs_shift; }); auto lhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = lhs_l.null_mask(), offset = lhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); // right side size_type rhs_shift = cudf::detail::get_value<size_type>(rhs_l.offsets(), rhs_l.offset(), rmm::cuda_stream_default); auto rhs_offsets = thrust::make_transform_iterator( rhs_l.offsets().begin<size_type>() + rhs_l.offset(), [rhs_shift] __device__(size_type offset) { return offset - rhs_shift; }); auto rhs_valids = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [mask = rhs_l.null_mask(), offset = rhs_l.offset()] __device__(size_type index) { return mask == nullptr ? true : cudf::bit_is_set(mask, index + offset); }); auto diff_iter = thrust::copy_if( rmm::exec_policy(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(lhs_l.size() + 1), differences.begin(), [lhs_offsets, rhs_offsets, lhs_valids, rhs_valids, num_rows = lhs_l.size()] __device__( size_type index) { // last offset has no validity associated with it if (index < num_rows - 1) { if (lhs_valids[index] != rhs_valids[index]) { return true; } // if validity matches -and- is false, we can ignore the actual values. this // is technically not checking "equal()", but it's how the non-list code path handles it if (!lhs_valids[index]) { return false; } } return lhs_offsets[index] == rhs_offsets[index] ? false : true; }); differences.resize(thrust::distance(differences.begin(), diff_iter), rmm::cuda_stream_default); // shrink back down if (not differences.is_empty()) GTEST_FAIL() << stringify_column_differences( differences, lhs, rhs, print_all_differences, depth); // recurse auto lhs_child = lhs_l.get_sliced_child(rmm::cuda_stream_default); auto rhs_child = rhs_l.get_sliced_child(rmm::cuda_stream_default); cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, print_all_differences, depth + 1); } }; template <bool check_exact_equality> struct column_comparator_impl<struct_view, check_exact_equality> { void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth) { structs_column_view l_scv(lhs); structs_column_view r_scv(rhs); std::for_each(thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + lhs.num_children(), [&](auto i) { column_view lhs_child = l_scv.get_sliced_child(i); column_view rhs_child = r_scv.get_sliced_child(i); cudf::type_dispatcher(lhs_child.type(), column_comparator<check_exact_equality>{}, lhs_child, rhs_child, print_all_differences, depth + 1); }); } }; template <bool check_exact_equality> struct column_comparator { template <typename T> void operator()(column_view const& lhs, column_view const& rhs, bool print_all_differences, int depth = 0) { // compare properties cudf::type_dispatcher(lhs.type(), column_property_comparator<check_exact_equality>{}, lhs, rhs); // compare values column_comparator_impl<T, check_exact_equality> comparator{}; comparator(lhs, rhs, print_all_differences, depth); } }; } // namespace /** * @copydoc cudf::test::expect_column_properties_equal */ void expect_column_properties_equal(column_view const& lhs, column_view const& rhs) { cudf::type_dispatcher(lhs.type(), column_property_comparator<true>{}, lhs, rhs); } /** * @copydoc cudf::test::expect_column_properties_equivalent */ void expect_column_properties_equivalent(column_view const& lhs, column_view const& rhs) { cudf::type_dispatcher(lhs.type(), column_property_comparator<false>{}, lhs, rhs); } /** * @copydoc cudf::test::expect_columns_equal */ void expect_columns_equal(cudf::column_view const& lhs, cudf::column_view const& rhs, bool print_all_differences) { cudf::type_dispatcher(lhs.type(), column_comparator<true>{}, lhs, rhs, print_all_differences); } /** * @copydoc cudf::test::expect_columns_equivalent */ void expect_columns_equivalent(cudf::column_view const& lhs, cudf::column_view const& rhs, bool print_all_differences) { cudf::type_dispatcher(lhs.type(), column_comparator<false>{}, lhs, rhs, print_all_differences); } /** * @copydoc cudf::test::expect_equal_buffers */ void expect_equal_buffers(void const* lhs, void const* rhs, std::size_t size_bytes) { if (size_bytes > 0) { EXPECT_NE(nullptr, lhs); EXPECT_NE(nullptr, rhs); } auto typed_lhs = static_cast<char const*>(lhs); auto typed_rhs = static_cast<char const*>(rhs); EXPECT_TRUE(thrust::equal(thrust::device, typed_lhs, typed_lhs + size_bytes, typed_rhs)); } /** * @copydoc cudf::test::bitmask_to_host */ std::vector<bitmask_type> bitmask_to_host(cudf::column_view const& c) { if (c.nullable()) { auto num_bitmasks = bitmask_allocation_size_bytes(c.size()) / sizeof(bitmask_type); std::vector<bitmask_type> host_bitmask(num_bitmasks); if (c.offset() == 0) { CUDA_TRY(cudaMemcpy(host_bitmask.data(), c.null_mask(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDeviceToHost)); } else { auto mask = copy_bitmask(c.null_mask(), c.offset(), c.offset() + c.size()); CUDA_TRY(cudaMemcpy(host_bitmask.data(), mask.data(), num_bitmasks * sizeof(bitmask_type), cudaMemcpyDeviceToHost)); } return host_bitmask; } else { return std::vector<bitmask_type>{}; } } namespace { template <typename T, typename std::enable_if_t<std::is_integral<T>::value>* = nullptr> static auto numeric_to_string_precise(T value) { return std::to_string(value); } template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value>* = nullptr> static auto numeric_to_string_precise(T value) { std::ostringstream o; o << std::setprecision(std::numeric_limits<T>::max_digits10) << value; return o.str(); } static auto duration_suffix(cudf::duration_D) { return " days"; } static auto duration_suffix(cudf::duration_s) { return " seconds"; } static auto duration_suffix(cudf::duration_ms) { return " milliseconds"; } static auto duration_suffix(cudf::duration_us) { return " microseconds"; } static auto duration_suffix(cudf::duration_ns) { return " nanoseconds"; } std::string get_nested_type_str(cudf::column_view const& view) { if (view.type().id() == cudf::type_id::LIST) { lists_column_view lcv(view); return cudf::jit::get_type_name(view.type()) + "<" + (get_nested_type_str(lcv.child())) + ">"; } if (view.type().id() == cudf::type_id::STRUCT) { std::ostringstream out; out << cudf::jit::get_type_name(view.type()) + "<"; std::transform(view.child_begin(), view.child_end(), std::ostream_iterator<std::string>(out, ","), [&out](auto const col) { return get_nested_type_str(col); }); out << ">"; return out.str(); } return cudf::jit::get_type_name(view.type()); } template <typename NestedColumnView> std::string nested_offsets_to_string(NestedColumnView const& c, std::string const& delimiter = ", ") { column_view offsets = (c.parent()).child(NestedColumnView::offsets_column_index); CUDF_EXPECTS(offsets.type().id() == type_id::INT32, "Column does not appear to be an offsets column"); CUDF_EXPECTS(offsets.offset() == 0, "Offsets column has an internal offset!"); size_type output_size = c.size() + 1; // the first offset value to normalize everything against size_type first = cudf::detail::get_value<size_type>(offsets, c.offset(), rmm::cuda_stream_default); rmm::device_uvector<size_type> shifted_offsets(output_size, rmm::cuda_stream_default); // normalize the offset values for the column offset size_type const* d_offsets = offsets.head<size_type>() + c.offset(); thrust::transform( rmm::exec_policy(), d_offsets, d_offsets + output_size, shifted_offsets.begin(), [first] __device__(int32_t offset) { return static_cast<size_type>(offset - first); }); auto const h_shifted_offsets = cudf::detail::make_host_vector_sync(shifted_offsets); std::ostringstream buffer; for (size_t idx = 0; idx < h_shifted_offsets.size(); idx++) { buffer << h_shifted_offsets[idx]; if (idx < h_shifted_offsets.size() - 1) { buffer << delimiter; } } return buffer.str(); } struct column_view_printer { template <typename Element, typename std::enable_if_t<is_numeric<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el); }); } } template <typename Element, typename std::enable_if_t<is_timestamp<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { // // For timestamps, convert timestamp column to column of strings, then // call string version // auto col_as_strings = cudf::strings::from_timestamps(col); if (col_as_strings->size() == 0) { return; } this->template operator()<cudf::string_view>(*col_as_strings, out, indent); } template <typename Element, typename std::enable_if_t<cudf::is_fixed_point<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto const h_data = cudf::test::to_host<Element>(col); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), std::back_inserter(out), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? static_cast<std::string>(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(std::cbegin(h_data.first), std::cend(h_data.first), std::back_inserter(out), [col](auto const& fp) { return static_cast<std::string>(fp); }); } } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::string_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { // // Implementation for strings, call special to_host variant // if (col.is_empty()) return; auto h_data = cudf::test::to_host<std::string>(col); out.resize(col.size()); std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return h_data.second.empty() || bit_is_set(h_data.second.data(), idx) ? h_data.first[idx] : std::string("NULL"); }); } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::dictionary32>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { cudf::dictionary_column_view dictionary(col); if (col.is_empty()) return; std::vector<std::string> keys = to_strings(dictionary.keys()); std::vector<std::string> indices = to_strings({dictionary.indices().type(), dictionary.size(), dictionary.indices().head(), dictionary.null_mask(), dictionary.null_count(), dictionary.offset()}); out.insert(out.end(), keys.begin(), keys.end()); if (!indices.empty()) { std::string first = "\x08 : " + indices.front(); // use : as delimiter out.push_back(first); // between keys and indices out.insert(out.end(), indices.begin() + 1, indices.end()); } } // Print the tick counts with the units template <typename Element, typename std::enable_if_t<is_duration<Element>()>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const&) { auto h_data = cudf::test::to_host<Element>(col); out.resize(col.size()); if (col.nullable()) { std::transform(thrust::make_counting_iterator(size_type{0}), thrust::make_counting_iterator(col.size()), out.begin(), [&h_data](auto idx) { return bit_is_set(h_data.second.data(), idx) ? numeric_to_string_precise(h_data.first[idx].count()) + duration_suffix(h_data.first[idx]) : std::string("NULL"); }); } else { std::transform(h_data.first.begin(), h_data.first.end(), out.begin(), [](Element el) { return numeric_to_string_precise(el.count()) + duration_suffix(el); }); } } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::list_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { lists_column_view lcv(col); // propagate slicing to the child if necessary column_view child = lcv.get_sliced_child(rmm::cuda_stream_default); bool const is_sliced = lcv.offset() > 0 || child.offset() > 0; std::string tmp = get_nested_type_str(col) + (is_sliced ? "(sliced)" : "") + ":\n" + indent + "Length : " + std::to_string(lcv.size()) + "\n" + indent + "Offsets : " + (lcv.size() > 0 ? nested_offsets_to_string(lcv) : "") + "\n" + (lcv.parent().nullable() ? indent + "Null count: " + std::to_string(lcv.null_count()) + "\n" + detail::to_string(bitmask_to_host(col), col.size(), indent) + "\n" : "") + // non-nested types don't typically display their null masks, so do it here for convenience. (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + (detail::to_string(child, ", ", indent + " ")) + "\n"; out.push_back(tmp); } template <typename Element, typename std::enable_if_t<std::is_same<Element, cudf::struct_view>::value>* = nullptr> void operator()(cudf::column_view const& col, std::vector<std::string>& out, std::string const& indent) { structs_column_view view{col}; std::ostringstream out_stream; out_stream << get_nested_type_str(col) << ":\n" << indent << "Length : " << view.size() << ":\n"; if (view.nullable()) { out_stream << indent << "Null count: " << view.null_count() << "\n" << detail::to_string(bitmask_to_host(col), col.size(), indent) << "\n"; } auto iter = thrust::make_counting_iterator(0); std::transform( iter, iter + view.num_children(), std::ostream_iterator<std::string>(out_stream, "\n"), [&](size_type index) { auto child = view.get_sliced_child(index); // non-nested types don't typically display their null masks, so do it here for convenience. return (!is_nested(child.type()) && child.nullable() ? " " + detail::to_string(bitmask_to_host(child), child.size(), indent) + "\n" : "") + detail::to_string(child, ", ", indent + " "); }); out.push_back(out_stream.str()); } }; } // namespace namespace detail { /** * @copydoc cudf::test::detail::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col, std::string const& indent) { std::vector<std::string> reply; cudf::type_dispatcher(col.type(), column_view_printer{}, col, reply, indent); return reply; } /** * @copydoc cudf::test::detail::to_string(cudf::column_view, std::string, std::string) * * @param indent Indentation for all output */ std::string to_string(cudf::column_view const& col, std::string const& delimiter, std::string const& indent) { std::ostringstream buffer; std::vector<std::string> h_data = to_strings(col, indent); buffer << indent; std::copy(h_data.begin(), h_data.end() - (!h_data.empty()), std::ostream_iterator<std::string>(buffer, delimiter.c_str())); if (!h_data.empty()) buffer << h_data.back(); return buffer.str(); } /** * @copydoc cudf::test::detail::to_string(std::vector<bitmask_type>, size_type, std::string) * * @param indent Indentation for all output. See comment in `to_strings` for * a detailed description. */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size, std::string const& indent) { std::ostringstream buffer; buffer << indent; for (int idx = null_mask_size - 1; idx >= 0; idx--) { buffer << (cudf::bit_is_set(null_mask.data(), idx) ? "1" : "0"); } return buffer.str(); } } // namespace detail /** * @copydoc cudf::test::to_strings */ std::vector<std::string> to_strings(cudf::column_view const& col) { return detail::to_strings(col); } /** * @copydoc cudf::test::to_string(cudf::column_view, std::string) */ std::string to_string(cudf::column_view const& col, std::string const& delimiter) { return detail::to_string(col, delimiter); } /** * @copydoc cudf::test::to_string(std::vector<bitmask_type>, size_type) */ std::string to_string(std::vector<bitmask_type> const& null_mask, size_type null_mask_size) { return detail::to_string(null_mask, null_mask_size); } /** * @copydoc cudf::test::print */ void print(cudf::column_view const& col, std::ostream& os, std::string const& delimiter) { os << to_string(col, delimiter) << std::endl; } /** * @copydoc cudf::test::validate_host_masks */ bool validate_host_masks(std::vector<bitmask_type> const& expected_mask, std::vector<bitmask_type> const& got_mask, size_type number_of_elements) { return std::all_of(thrust::make_counting_iterator(0), thrust::make_counting_iterator(number_of_elements), [&expected_mask, &got_mask](auto index) { return cudf::bit_is_set(expected_mask.data(), index) == cudf::bit_is_set(got_mask.data(), index); }); } } // namespace test } // namespace cudf
103fdf7ee8e57602d5f67a8a762e495b2df3c0be.hip
// !!! This is a file automatically generated by hipify!!! /** * Fabio D'Isidoro, ETH Zurich, 08.08.2017 * * Implementation of a CUDA-based Cpp library for fast DRR generation with GPU acceleration * * Based both on the description found in the Improved Algorithm section in Jacobs paper (1998) * https://www.researchgate.net/publication/2344985_A_Fast_Algorithm_to_Calculate_the_Exact_Radiological_Path_Through_a_Pixel_Or_Voxel_Space * and on the implementation suggested in Greef et al 2009 * https://www.ncbi.nlm.nih.gov/pubmed/19810482 * * Source file for the Class Siddon (see header for more information) */ #include "stdio.h" #include "siddon_class.cuh" #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __device__ const float epsilon = 2.22045e-016; // to compare double float values // auxiliary functions __device__ void get_dest(int idx, float *dest_array, float *dest) { dest[0] = dest_array[0 + 3 * idx]; dest[1] = dest_array[1 + 3 * idx]; dest[2] = dest_array[2 + 3 * idx]; } __device__ void compute_alpha_x(const float &X0, const float &spacing_x, const int &i, const float &source_x, const float &dest_x, float &alpha_x) { alpha_x = ((X0 + static_cast<float>(i)*spacing_x) - source_x) / (dest_x - source_x); } __device__ void compute_alpha_y(const float &Y0, const float &spacing_y, const int &j, const float &source_y, const float &dest_y, float &alpha_y) { alpha_y = ((Y0 + static_cast<float>(j)*spacing_y) - source_y) / (dest_y - source_y); } __device__ void compute_alpha_z(const float &Z0, const float &spacing_z, const int &k, const float &source_z, const float &dest_z, float &alpha_z) { alpha_z = ((Z0 + static_cast<float>(k)*spacing_z) - source_z) / (dest_z - source_z); } __device__ void compute_phi_x(const float &X0, const float &spacing_x, float &alpha, const float &source_x, const float &dest_x, float &phi_x) { phi_x = (source_x + alpha*(dest_x - source_x) - X0) / spacing_x; } __device__ void compute_phi_y(const float &Y0, const float &spacing_y, float &alpha, const float &source_y, const float &dest_y, float &phi_y) { phi_y = (source_y + alpha*(dest_y - source_y) - Y0) / spacing_y; } __device__ void compute_phi_z(const float &Z0, const float &spacing_z, float &alpha, const float &source_z, const float &dest_z, float &phi_z) { phi_z = (source_z + alpha*(dest_z - source_z) - Z0) / spacing_z; } __device__ void update_idx(unsigned int &i_v, unsigned int &j_v, unsigned int &k_v, const int &size_x, const int &size_y, int &arrayIdx) { arrayIdx = i_v + size_x * (j_v + size_y * k_v); } __global__ void cuda_kernel(float *DRRarray, float *source, float *DestArray, int DRRsize0, float *movImgArray, int *MovSize, float *MovSpacing, float X0, float Y0, float Z0) { // DRR image indeces int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; // DRR array index int DRRidx = row + DRRsize0 * col; //printf("Thread index %i\n", DRRidx); if (DRRidx < DRRsize0 * DRRsize0) { // checks if thread index is within the length of the DRR array // --- declaration of variables for Siddon --- float alpha_min, alpha_max; float alpha_x_min, alpha_x_max, alpha_y_min, alpha_y_max, alpha_z_min, alpha_z_max; int i_min, i_max, j_min, j_max, k_min, k_max; // indeces corresponding to first and last intersected voxels float alpha_current; float alpha_x_next; float alpha_y_next; float alpha_z_next; float distance; int arrayIdx; int arrayIdx_old; unsigned int i_v, j_v, k_v; float alpha_first_pixel; float density_value = 0.; // --- define destination point based on DRR array index --- float dest[3]; get_dest(DRRidx, DestArray, dest); // --- source-to-destination distance --- distance = sqrtf((dest[0] - source[0])*(dest[0] - source[0]) + (dest[1] - source[1])*(dest[1] - source[1]) + (dest[2] - source[2])*(dest[2] - source[2])); float dx = MovSpacing[0] / fabsf(dest[0] - source[0]); float dy = MovSpacing[1] / fabsf(dest[1] - source[1]); float dz = MovSpacing[2] / fabsf(dest[2] - source[2]); // --- find alpha_min and alpha_max // initialize alpha_min and alpha_max alpha_min = 0.; alpha_max = 1.; // X if (fabsf(dest[0] - source[0]) > epsilon) { float alpha_x0 = (X0 - source[0]) / (dest[0] - source[0]); float alpha_xN; compute_alpha_x(X0, MovSpacing[0], MovSize[0], source[0], dest[0], alpha_xN); alpha_x_min = fminf(alpha_x0, alpha_xN); alpha_x_max = fmaxf(alpha_x0, alpha_xN); if (alpha_x_min > alpha_min) { alpha_min = alpha_x_min; }; if (alpha_x_max < alpha_max) { alpha_max = alpha_x_max; }; } // Y if (fabsf(dest[1] - source[1]) > epsilon) { float alpha_y0 = (Y0 - source[1]) / (dest[1] - source[1]); float alpha_yN; compute_alpha_y(Y0, MovSpacing[1], MovSize[1], source[1], dest[1], alpha_yN); alpha_y_min = fminf(alpha_y0, alpha_yN); alpha_y_max = fmaxf(alpha_y0, alpha_yN); if (alpha_y_min > alpha_min) { alpha_min = alpha_y_min; }; if (alpha_y_max < alpha_max) { alpha_max = alpha_y_max; }; } // Z if (fabsf(dest[2] - source[2]) > epsilon) { float alpha_z0 = (Z0 - source[2]) / (dest[2] - source[2]); float alpha_zN; compute_alpha_z(Z0, MovSpacing[2], MovSize[2], source[2], dest[2], alpha_zN); alpha_z_min = fminf(alpha_z0, alpha_zN); alpha_z_max = fmaxf(alpha_z0, alpha_zN); if (alpha_z_min > alpha_min) { alpha_min = alpha_z_min; }; if (alpha_z_max < alpha_max) { alpha_max = alpha_z_max; }; } //if (DRRidx == 0){ //printf("Alpha min = %f\n", alpha_min); //printf("Alpha max = %f\n", alpha_max); //} // --- initialize alpha --- alpha_current = alpha_min; if (alpha_min < alpha_max) { // compute i_min, i_max and initialize alpha_x_next if (dest[0] - source[0] > 0.) { // i_min if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_min = 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x); i_min = ceil(phi_x); } // i_max if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_max = MovSize[0] - 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x); i_max = floor(phi_x); } // initialize alpha_x_next compute_alpha_x(X0, MovSpacing[0], i_min, source[0], dest[0], alpha_x_next); } else { // i_max if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_max = MovSize[0] - 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x); i_max = floor(phi_x); } // i_min if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_min = 0; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x); i_min = ceil(phi_x); } // initialize alpha_x_next compute_alpha_x(X0, MovSpacing[0], i_max, source[0], dest[0], alpha_x_next); } // compute j_min, j_max and initialize alpha_y_next if (dest[1] - source[1] > 0.) { // j_min if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_min = 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y); j_min = ceil(phi_y); } // j_max if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_max = MovSize[1] - 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y); j_max = floor(phi_y); } // initialize alpha_y_next compute_alpha_y(Y0, MovSpacing[1], j_min, source[1], dest[1], alpha_y_next); } else { // j_max if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_max = MovSize[1] - 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y); j_max = floor(phi_y); } // j_min if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_min = 0; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y); j_min = ceil(phi_y); } // initialize alpha_y_next compute_alpha_y(Y0, MovSpacing[1], j_max, source[1], dest[1], alpha_y_next); } // compute k_min, k_max and initialize alpha_z_next if (dest[2] - source[2] > 0.) { // k_min if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_min = 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z); k_min = ceil(phi_z); } // k_max if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_max = MovSize[2] - 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z); k_max = floor(phi_z); } // initialize alpha_z_next compute_alpha_z(Z0, MovSpacing[2], k_min, source[2], dest[2], alpha_z_next); } else { // k_max if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_max = MovSize[2] - 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z); k_max = floor(phi_z); } // k_min if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_min = 0; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z); k_min = ceil(phi_z); } // initialize alpha_z_next compute_alpha_z(Z0, MovSpacing[2], k_max, source[2], dest[2], alpha_z_next); } //if (DRRidx == 0) { // printf("i_min, i_max, Alpha_x_next = %d %d %f\n", i_min, i_max, alpha_x_next); // printf("j_min, j_max, Alpha_y_next = %d %d %f\n", j_min, j_max, alpha_y_next); // printf("k_min, k_max, Alpha_z_next = %d %d %f\n", k_min, k_max, alpha_z_next); //} // --- initialize first intersected pixel i_v, j_v, k_v --- if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) { alpha_first_pixel = (alpha_y_next + alpha_min) / 2.; } else if (alpha_x_next < alpha_z_next) { alpha_first_pixel = (alpha_x_next + alpha_min) / 2.; } else { alpha_first_pixel = (alpha_z_next + alpha_min) / 2.; } float phi_x = 0.; float phi_y = 0.; float phi_z = 0.; compute_phi_x(X0, MovSpacing[0], alpha_first_pixel, source[0], dest[0], phi_x); i_v = floor(phi_x); compute_phi_y(Y0, MovSpacing[1], alpha_first_pixel, source[1], dest[1], phi_y); j_v = floor(phi_y); compute_phi_z(Z0, MovSpacing[2], alpha_first_pixel, source[2], dest[2], phi_z); k_v = floor(phi_z); // initialize array index of first intersected pixel arrayIdx = i_v + MovSize[0] * (j_v + MovSize[1] * k_v); arrayIdx_old = i_v + MovSize[0] * (j_v + MovSize[1] * k_v); //if (DRRidx == 0) { // printf("i_v, j_v, k_v = %d %d %d\n", i_v, j_v, k_v); // printf("arrayIdx, arrayIdx_old = %d %d\n", arrayIdx, arrayIdx_old); //} // iterator indeces int stop = (i_max - i_min + 1) + (j_max - j_min + 1) + (k_max - k_min + 1); int iter = 0; //while (alpha_current < 1. && alpha_current < alpha_max) { while (iter < stop) { float l; // next intersection plane is y if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) { //T alpha_mid = (alpha_current + alpha_y_next) / 2.; l = (alpha_y_next - alpha_current); alpha_current = alpha_y_next; // update alpha_y_next += dy; j_v += (dest[1] - source[1] > 0.) ? 1 : -1; } else if (alpha_x_next < alpha_z_next) { // next intersection plane is x //T alpha_mid = (alpha_current + alpha_x_next) / 2.; l = (alpha_x_next - alpha_current); alpha_current = alpha_x_next; // update alpha_x_next += dx; i_v += (dest[0] - source[0] > 0.) ? 1 : -1; } else { // next intersection plane is z //T alpha_mid = (alpha_current + alpha_z_next) / 2.; l = (alpha_z_next - alpha_current); alpha_current = alpha_z_next; // update alpha_z_next += dz; k_v += (dest[2] - source[2] > 0.) ? 1 : -1; } // update array index update_idx(i_v, j_v, k_v, MovSize[0], MovSize[1], arrayIdx); //if (arrayIdx < 0.) { // printf("arrayIdx negative! %i", arrayIdx); //} if (arrayIdx_old > 0.){ // update density value if (movImgArray[arrayIdx_old] != 0.) { density_value += movImgArray[arrayIdx_old] * l; //std::cout << density_value << std::endl; } } // update arrayIdx arrayIdx_old = arrayIdx; // update iter iter += 1; } // multiply by the distance density_value *= distance; //std::cout << density_value << std::endl; } // update density value array DRRarray[DRRidx] = density_value; } } /** * * Deafult constructor * **/ SiddonGpu::SiddonGpu() { } /** * * Overloaded constructor loads the CT scan (together with size and spacing) onto GPU memory * **/ SiddonGpu::SiddonGpu(int *NumThreadsPerBlock, float *movImgArray, int *MovSize, float *MovSpacing, float X0, float Y0, float Z0, int *DRRSize) { // ---- Allocate variable members ---- m_NumThreadsPerBlock[0] = NumThreadsPerBlock[0]; m_NumThreadsPerBlock[1] = NumThreadsPerBlock[1]; m_NumThreadsPerBlock[2] = NumThreadsPerBlock[2]; //m_MovSize[0] = MovSize[0]; //m_MovSize[1] = MovSize[1]; //m_MovSize[2] = MovSize[2]; m_X0 = X0; m_Y0 = Y0; m_Z0 = Z0; m_DRRsize[0] = DRRSize[0]; m_DRRsize[1] = DRRSize[1]; m_DRRsize[2] = DRRSize[2]; m_DRRsize0 = DRRSize[0]; m_movImgMemSize = MovSize[0] * MovSize[1] * MovSize[2] * sizeof(float); m_DestMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2] * 3) * sizeof(float); m_DrrMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2]) * sizeof(float); // memory for each output drr // allocate space for device copies hipMalloc((void**)&m_d_movImgArray, m_movImgMemSize); hipMalloc((void**)&m_d_MovSize, 3 * sizeof(int)); hipMalloc((void**)&m_d_MovSpacing, 3 * sizeof(float)); // Copy arrays related to the moving image onto device array hipMemcpy(m_d_movImgArray, movImgArray, m_movImgMemSize, hipMemcpyHostToDevice); hipMemcpy(m_d_MovSize, MovSize, 3 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(m_d_MovSpacing, MovSpacing, 3 * sizeof(float), hipMemcpyHostToDevice); //std::cout << "Siddon object Initialization: GPU memory prepared \n" << std::endl; //printf("ctor %p\n", this); // in constructors } /** * * Destructor clears everything left from the GPU memory * **/ SiddonGpu::~SiddonGpu() { hipFree(m_d_movImgArray); hipFree(m_d_MovSize); hipFree(m_d_MovSpacing); std::cout << "Siddon object destruction: GPU memory cleared \n" << std::endl; //printf("dtor %p\n", this); // in destructor } /** *-The function generate DRR must be called with the following variables : * * @param source : array of(transformed) source physical coordinates * @param DestArray : C - ordered 1D array of physical coordinates relative to the(transformed) output DRR image. * @param drrArray : output, 1D array for output values of projected CT densities * **/ void SiddonGpu::generateDRR(float *source, float *DestArray, float *drrArray) { hipError_t ierrAsync; hipError_t ierrSync; // declare pointer to device memory for output DRR array float *d_DestArray; float *d_source; float *d_drr_array; // allocate space on device hipMalloc((void**)&d_drr_array, m_DrrMemSize); hipMalloc((void**)&d_source, 3 * sizeof(float)); hipMalloc((void**)&d_DestArray, m_DestMemSize); // Copy source and destination to device hipMemcpy(d_DestArray, DestArray, m_DestMemSize, hipMemcpyHostToDevice); hipMemcpy(d_source, source, 3 * sizeof(float), hipMemcpyHostToDevice); //std::cout << "DRR generation: GPU memory prepared \n" << std::endl; // determine number of required blocks dim3 threads_per_block(m_NumThreadsPerBlock[0], m_NumThreadsPerBlock[1], 1); dim3 number_of_blocks((m_DRRsize[0] / threads_per_block.x) + 1, (m_DRRsize[1] / threads_per_block.y) + 1, 1); //// Query GPU device //hipDeviceProp_t prop; //hipGetDeviceProperties(&prop, 0); //std::cout << "Max threads per block " << prop.maxThreadsPerBlock << std::endl; //hipGetDeviceProperties(&prop, 0); //if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) { // printf("Too many threads per block ... exiting\n"); // goto cleanup; //} //if (threads_per_block.x > prop.maxThreadsDim[0]) { // printf("Too many threads in x-direction ... exiting\n"); // goto cleanup; //} //if (threads_per_block.y > prop.maxThreadsDim[1]) { // printf("Too many threads in y-direction ... exiting\n"); // goto cleanup; //} //if (threads_per_block.z > prop.maxThreadsDim[2]) { // printf("Too many threads in z-direction ... exiting\n"); // goto cleanup; //} // launch kernel cuda_kernel << <number_of_blocks, threads_per_block >> >(d_drr_array, d_source, d_DestArray, m_DRRsize0, m_d_movImgArray, m_d_MovSize, m_d_MovSpacing, m_X0, m_Y0, m_Z0); // Check for errors in Kernel launch ierrSync = hipGetLastError(); ierrAsync = hipDeviceSynchronize(); // Wait for the GPU to finish if (ierrSync != hipSuccess) { printf("Cuda Sync error: %s\n", hipGetErrorString(ierrSync)); //goto cleanup; } if (ierrAsync != hipSuccess) { printf("Cuda Async error: %s\n", hipGetErrorString(ierrAsync)); //goto cleanup; } // Copy result to host array hipMemcpy(drrArray, d_drr_array, m_DrrMemSize, hipMemcpyDeviceToHost); // Clean up device DRR array cleanup: hipFree(d_drr_array); hipFree(d_source); hipFree(d_DestArray); //std::cout << "DRR generation: GPU memory cleared \n" << std::endl; return; }
103fdf7ee8e57602d5f67a8a762e495b2df3c0be.cu
/** * Fabio D'Isidoro, ETH Zurich, 08.08.2017 * * Implementation of a CUDA-based Cpp library for fast DRR generation with GPU acceleration * * Based both on the description found in the “Improved Algorithm” section in Jacob’s paper (1998) * https://www.researchgate.net/publication/2344985_A_Fast_Algorithm_to_Calculate_the_Exact_Radiological_Path_Through_a_Pixel_Or_Voxel_Space * and on the implementation suggested in Greef et al 2009 * https://www.ncbi.nlm.nih.gov/pubmed/19810482 * * Source file for the Class Siddon (see header for more information) */ #include "stdio.h" #include "siddon_class.cuh" #include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" __device__ const float epsilon = 2.22045e-016; // to compare double float values // auxiliary functions __device__ void get_dest(int idx, float *dest_array, float *dest) { dest[0] = dest_array[0 + 3 * idx]; dest[1] = dest_array[1 + 3 * idx]; dest[2] = dest_array[2 + 3 * idx]; } __device__ void compute_alpha_x(const float &X0, const float &spacing_x, const int &i, const float &source_x, const float &dest_x, float &alpha_x) { alpha_x = ((X0 + static_cast<float>(i)*spacing_x) - source_x) / (dest_x - source_x); } __device__ void compute_alpha_y(const float &Y0, const float &spacing_y, const int &j, const float &source_y, const float &dest_y, float &alpha_y) { alpha_y = ((Y0 + static_cast<float>(j)*spacing_y) - source_y) / (dest_y - source_y); } __device__ void compute_alpha_z(const float &Z0, const float &spacing_z, const int &k, const float &source_z, const float &dest_z, float &alpha_z) { alpha_z = ((Z0 + static_cast<float>(k)*spacing_z) - source_z) / (dest_z - source_z); } __device__ void compute_phi_x(const float &X0, const float &spacing_x, float &alpha, const float &source_x, const float &dest_x, float &phi_x) { phi_x = (source_x + alpha*(dest_x - source_x) - X0) / spacing_x; } __device__ void compute_phi_y(const float &Y0, const float &spacing_y, float &alpha, const float &source_y, const float &dest_y, float &phi_y) { phi_y = (source_y + alpha*(dest_y - source_y) - Y0) / spacing_y; } __device__ void compute_phi_z(const float &Z0, const float &spacing_z, float &alpha, const float &source_z, const float &dest_z, float &phi_z) { phi_z = (source_z + alpha*(dest_z - source_z) - Z0) / spacing_z; } __device__ void update_idx(unsigned int &i_v, unsigned int &j_v, unsigned int &k_v, const int &size_x, const int &size_y, int &arrayIdx) { arrayIdx = i_v + size_x * (j_v + size_y * k_v); } __global__ void cuda_kernel(float *DRRarray, float *source, float *DestArray, int DRRsize0, float *movImgArray, int *MovSize, float *MovSpacing, float X0, float Y0, float Z0) { // DRR image indeces int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; // DRR array index int DRRidx = row + DRRsize0 * col; //printf("Thread index %i\n", DRRidx); if (DRRidx < DRRsize0 * DRRsize0) { // checks if thread index is within the length of the DRR array // --- declaration of variables for Siddon --- float alpha_min, alpha_max; float alpha_x_min, alpha_x_max, alpha_y_min, alpha_y_max, alpha_z_min, alpha_z_max; int i_min, i_max, j_min, j_max, k_min, k_max; // indeces corresponding to first and last intersected voxels float alpha_current; float alpha_x_next; float alpha_y_next; float alpha_z_next; float distance; int arrayIdx; int arrayIdx_old; unsigned int i_v, j_v, k_v; float alpha_first_pixel; float density_value = 0.; // --- define destination point based on DRR array index --- float dest[3]; get_dest(DRRidx, DestArray, dest); // --- source-to-destination distance --- distance = sqrtf((dest[0] - source[0])*(dest[0] - source[0]) + (dest[1] - source[1])*(dest[1] - source[1]) + (dest[2] - source[2])*(dest[2] - source[2])); float dx = MovSpacing[0] / fabsf(dest[0] - source[0]); float dy = MovSpacing[1] / fabsf(dest[1] - source[1]); float dz = MovSpacing[2] / fabsf(dest[2] - source[2]); // --- find alpha_min and alpha_max // initialize alpha_min and alpha_max alpha_min = 0.; alpha_max = 1.; // X if (fabsf(dest[0] - source[0]) > epsilon) { float alpha_x0 = (X0 - source[0]) / (dest[0] - source[0]); float alpha_xN; compute_alpha_x(X0, MovSpacing[0], MovSize[0], source[0], dest[0], alpha_xN); alpha_x_min = fminf(alpha_x0, alpha_xN); alpha_x_max = fmaxf(alpha_x0, alpha_xN); if (alpha_x_min > alpha_min) { alpha_min = alpha_x_min; }; if (alpha_x_max < alpha_max) { alpha_max = alpha_x_max; }; } // Y if (fabsf(dest[1] - source[1]) > epsilon) { float alpha_y0 = (Y0 - source[1]) / (dest[1] - source[1]); float alpha_yN; compute_alpha_y(Y0, MovSpacing[1], MovSize[1], source[1], dest[1], alpha_yN); alpha_y_min = fminf(alpha_y0, alpha_yN); alpha_y_max = fmaxf(alpha_y0, alpha_yN); if (alpha_y_min > alpha_min) { alpha_min = alpha_y_min; }; if (alpha_y_max < alpha_max) { alpha_max = alpha_y_max; }; } // Z if (fabsf(dest[2] - source[2]) > epsilon) { float alpha_z0 = (Z0 - source[2]) / (dest[2] - source[2]); float alpha_zN; compute_alpha_z(Z0, MovSpacing[2], MovSize[2], source[2], dest[2], alpha_zN); alpha_z_min = fminf(alpha_z0, alpha_zN); alpha_z_max = fmaxf(alpha_z0, alpha_zN); if (alpha_z_min > alpha_min) { alpha_min = alpha_z_min; }; if (alpha_z_max < alpha_max) { alpha_max = alpha_z_max; }; } //if (DRRidx == 0){ //printf("Alpha min = %f\n", alpha_min); //printf("Alpha max = %f\n", alpha_max); //} // --- initialize alpha --- alpha_current = alpha_min; if (alpha_min < alpha_max) { // compute i_min, i_max and initialize alpha_x_next if (dest[0] - source[0] > 0.) { // i_min if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_min = 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x); i_min = ceil(phi_x); } // i_max if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_max = MovSize[0] - 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x); i_max = floor(phi_x); } // initialize alpha_x_next compute_alpha_x(X0, MovSpacing[0], i_min, source[0], dest[0], alpha_x_next); } else { // i_max if (fabsf(alpha_min - alpha_x_min) < epsilon) { i_max = MovSize[0] - 1; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_min, source[0], dest[0], phi_x); i_max = floor(phi_x); } // i_min if (fabsf(alpha_max - alpha_x_max) < epsilon) { i_min = 0; } else { float phi_x; compute_phi_x(X0, MovSpacing[0], alpha_max, source[0], dest[0], phi_x); i_min = ceil(phi_x); } // initialize alpha_x_next compute_alpha_x(X0, MovSpacing[0], i_max, source[0], dest[0], alpha_x_next); } // compute j_min, j_max and initialize alpha_y_next if (dest[1] - source[1] > 0.) { // j_min if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_min = 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y); j_min = ceil(phi_y); } // j_max if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_max = MovSize[1] - 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y); j_max = floor(phi_y); } // initialize alpha_y_next compute_alpha_y(Y0, MovSpacing[1], j_min, source[1], dest[1], alpha_y_next); } else { // j_max if (fabsf(alpha_min - alpha_y_min) < epsilon) { j_max = MovSize[1] - 1; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_min, source[1], dest[1], phi_y); j_max = floor(phi_y); } // j_min if (fabsf(alpha_max - alpha_y_max) < epsilon) { j_min = 0; } else { float phi_y; compute_phi_y(Y0, MovSpacing[1], alpha_max, source[1], dest[1], phi_y); j_min = ceil(phi_y); } // initialize alpha_y_next compute_alpha_y(Y0, MovSpacing[1], j_max, source[1], dest[1], alpha_y_next); } // compute k_min, k_max and initialize alpha_z_next if (dest[2] - source[2] > 0.) { // k_min if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_min = 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z); k_min = ceil(phi_z); } // k_max if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_max = MovSize[2] - 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z); k_max = floor(phi_z); } // initialize alpha_z_next compute_alpha_z(Z0, MovSpacing[2], k_min, source[2], dest[2], alpha_z_next); } else { // k_max if (fabsf(alpha_min - alpha_z_min) < epsilon) { k_max = MovSize[2] - 1; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_min, source[2], dest[2], phi_z); k_max = floor(phi_z); } // k_min if (fabsf(alpha_max - alpha_z_max) < epsilon) { k_min = 0; } else { float phi_z; compute_phi_z(Z0, MovSpacing[2], alpha_max, source[2], dest[2], phi_z); k_min = ceil(phi_z); } // initialize alpha_z_next compute_alpha_z(Z0, MovSpacing[2], k_max, source[2], dest[2], alpha_z_next); } //if (DRRidx == 0) { // printf("i_min, i_max, Alpha_x_next = %d %d %f\n", i_min, i_max, alpha_x_next); // printf("j_min, j_max, Alpha_y_next = %d %d %f\n", j_min, j_max, alpha_y_next); // printf("k_min, k_max, Alpha_z_next = %d %d %f\n", k_min, k_max, alpha_z_next); //} // --- initialize first intersected pixel i_v, j_v, k_v --- if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) { alpha_first_pixel = (alpha_y_next + alpha_min) / 2.; } else if (alpha_x_next < alpha_z_next) { alpha_first_pixel = (alpha_x_next + alpha_min) / 2.; } else { alpha_first_pixel = (alpha_z_next + alpha_min) / 2.; } float phi_x = 0.; float phi_y = 0.; float phi_z = 0.; compute_phi_x(X0, MovSpacing[0], alpha_first_pixel, source[0], dest[0], phi_x); i_v = floor(phi_x); compute_phi_y(Y0, MovSpacing[1], alpha_first_pixel, source[1], dest[1], phi_y); j_v = floor(phi_y); compute_phi_z(Z0, MovSpacing[2], alpha_first_pixel, source[2], dest[2], phi_z); k_v = floor(phi_z); // initialize array index of first intersected pixel arrayIdx = i_v + MovSize[0] * (j_v + MovSize[1] * k_v); arrayIdx_old = i_v + MovSize[0] * (j_v + MovSize[1] * k_v); //if (DRRidx == 0) { // printf("i_v, j_v, k_v = %d %d %d\n", i_v, j_v, k_v); // printf("arrayIdx, arrayIdx_old = %d %d\n", arrayIdx, arrayIdx_old); //} // iterator indeces int stop = (i_max - i_min + 1) + (j_max - j_min + 1) + (k_max - k_min + 1); int iter = 0; //while (alpha_current < 1. && alpha_current < alpha_max) { while (iter < stop) { float l; // next intersection plane is y if ((alpha_y_next < alpha_x_next) && (alpha_y_next < alpha_z_next)) { //T alpha_mid = (alpha_current + alpha_y_next) / 2.; l = (alpha_y_next - alpha_current); alpha_current = alpha_y_next; // update alpha_y_next += dy; j_v += (dest[1] - source[1] > 0.) ? 1 : -1; } else if (alpha_x_next < alpha_z_next) { // next intersection plane is x //T alpha_mid = (alpha_current + alpha_x_next) / 2.; l = (alpha_x_next - alpha_current); alpha_current = alpha_x_next; // update alpha_x_next += dx; i_v += (dest[0] - source[0] > 0.) ? 1 : -1; } else { // next intersection plane is z //T alpha_mid = (alpha_current + alpha_z_next) / 2.; l = (alpha_z_next - alpha_current); alpha_current = alpha_z_next; // update alpha_z_next += dz; k_v += (dest[2] - source[2] > 0.) ? 1 : -1; } // update array index update_idx(i_v, j_v, k_v, MovSize[0], MovSize[1], arrayIdx); //if (arrayIdx < 0.) { // printf("arrayIdx negative! %i", arrayIdx); //} if (arrayIdx_old > 0.){ // update density value if (movImgArray[arrayIdx_old] != 0.) { density_value += movImgArray[arrayIdx_old] * l; //std::cout << density_value << std::endl; } } // update arrayIdx arrayIdx_old = arrayIdx; // update iter iter += 1; } // multiply by the distance density_value *= distance; //std::cout << density_value << std::endl; } // update density value array DRRarray[DRRidx] = density_value; } } /** * * Deafult constructor * **/ SiddonGpu::SiddonGpu() { } /** * * Overloaded constructor loads the CT scan (together with size and spacing) onto GPU memory * **/ SiddonGpu::SiddonGpu(int *NumThreadsPerBlock, float *movImgArray, int *MovSize, float *MovSpacing, float X0, float Y0, float Z0, int *DRRSize) { // ---- Allocate variable members ---- m_NumThreadsPerBlock[0] = NumThreadsPerBlock[0]; m_NumThreadsPerBlock[1] = NumThreadsPerBlock[1]; m_NumThreadsPerBlock[2] = NumThreadsPerBlock[2]; //m_MovSize[0] = MovSize[0]; //m_MovSize[1] = MovSize[1]; //m_MovSize[2] = MovSize[2]; m_X0 = X0; m_Y0 = Y0; m_Z0 = Z0; m_DRRsize[0] = DRRSize[0]; m_DRRsize[1] = DRRSize[1]; m_DRRsize[2] = DRRSize[2]; m_DRRsize0 = DRRSize[0]; m_movImgMemSize = MovSize[0] * MovSize[1] * MovSize[2] * sizeof(float); m_DestMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2] * 3) * sizeof(float); m_DrrMemSize = (DRRSize[0] * DRRSize[1] * DRRSize[2]) * sizeof(float); // memory for each output drr // allocate space for device copies cudaMalloc((void**)&m_d_movImgArray, m_movImgMemSize); cudaMalloc((void**)&m_d_MovSize, 3 * sizeof(int)); cudaMalloc((void**)&m_d_MovSpacing, 3 * sizeof(float)); // Copy arrays related to the moving image onto device array cudaMemcpy(m_d_movImgArray, movImgArray, m_movImgMemSize, cudaMemcpyHostToDevice); cudaMemcpy(m_d_MovSize, MovSize, 3 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(m_d_MovSpacing, MovSpacing, 3 * sizeof(float), cudaMemcpyHostToDevice); //std::cout << "Siddon object Initialization: GPU memory prepared \n" << std::endl; //printf("ctor %p\n", this); // in constructors } /** * * Destructor clears everything left from the GPU memory * **/ SiddonGpu::~SiddonGpu() { cudaFree(m_d_movImgArray); cudaFree(m_d_MovSize); cudaFree(m_d_MovSpacing); std::cout << "Siddon object destruction: GPU memory cleared \n" << std::endl; //printf("dtor %p\n", this); // in destructor } /** *-The function generate DRR must be called with the following variables : * * @param source : array of(transformed) source physical coordinates * @param DestArray : C - ordered 1D array of physical coordinates relative to the(transformed) output DRR image. * @param drrArray : output, 1D array for output values of projected CT densities * **/ void SiddonGpu::generateDRR(float *source, float *DestArray, float *drrArray) { cudaError_t ierrAsync; cudaError_t ierrSync; // declare pointer to device memory for output DRR array float *d_DestArray; float *d_source; float *d_drr_array; // allocate space on device cudaMalloc((void**)&d_drr_array, m_DrrMemSize); cudaMalloc((void**)&d_source, 3 * sizeof(float)); cudaMalloc((void**)&d_DestArray, m_DestMemSize); // Copy source and destination to device cudaMemcpy(d_DestArray, DestArray, m_DestMemSize, cudaMemcpyHostToDevice); cudaMemcpy(d_source, source, 3 * sizeof(float), cudaMemcpyHostToDevice); //std::cout << "DRR generation: GPU memory prepared \n" << std::endl; // determine number of required blocks dim3 threads_per_block(m_NumThreadsPerBlock[0], m_NumThreadsPerBlock[1], 1); dim3 number_of_blocks((m_DRRsize[0] / threads_per_block.x) + 1, (m_DRRsize[1] / threads_per_block.y) + 1, 1); //// Query GPU device //cudaDeviceProp prop; //cudaGetDeviceProperties(&prop, 0); //std::cout << "Max threads per block " << prop.maxThreadsPerBlock << std::endl; //cudaGetDeviceProperties(&prop, 0); //if (threads_per_block.x * threads_per_block.y * threads_per_block.z > prop.maxThreadsPerBlock) { // printf("Too many threads per block ... exiting\n"); // goto cleanup; //} //if (threads_per_block.x > prop.maxThreadsDim[0]) { // printf("Too many threads in x-direction ... exiting\n"); // goto cleanup; //} //if (threads_per_block.y > prop.maxThreadsDim[1]) { // printf("Too many threads in y-direction ... exiting\n"); // goto cleanup; //} //if (threads_per_block.z > prop.maxThreadsDim[2]) { // printf("Too many threads in z-direction ... exiting\n"); // goto cleanup; //} // launch kernel cuda_kernel << <number_of_blocks, threads_per_block >> >(d_drr_array, d_source, d_DestArray, m_DRRsize0, m_d_movImgArray, m_d_MovSize, m_d_MovSpacing, m_X0, m_Y0, m_Z0); // Check for errors in Kernel launch ierrSync = cudaGetLastError(); ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish if (ierrSync != cudaSuccess) { printf("Cuda Sync error: %s\n", cudaGetErrorString(ierrSync)); //goto cleanup; } if (ierrAsync != cudaSuccess) { printf("Cuda Async error: %s\n", cudaGetErrorString(ierrAsync)); //goto cleanup; } // Copy result to host array cudaMemcpy(drrArray, d_drr_array, m_DrrMemSize, cudaMemcpyDeviceToHost); // Clean up device DRR array cleanup: cudaFree(d_drr_array); cudaFree(d_source); cudaFree(d_DestArray); //std::cout << "DRR generation: GPU memory cleared \n" << std::endl; return; }
6de5034676637eb88adeeb7e6c6e5abfb03489d5.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <torch/torch.h> /* Includes, cuda */ #include <rocblas.h> #include <hip/hip_runtime.h> // constants for fused bias+relu kernel #define BIAS_RELU_FW_NTHREADS 128 // forward number of thread per block #define BIAS_RELU_BW_NTHREADS_X 32 // backward number of thread in feature dim #define BIAS_RELU_BW_NTHREADS_Y 16 // backward number of thread in batch dim #define BIAS_RELU_RED_PER_THREAD 16 // backward minimal reduction length per thread // move to a header later on #define ILP 4 template<typename T> __host__ __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(T* dst, volatile T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(volatile T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } // Keep ReLU in float only. When using half, cast to float before calling. __device__ __inline__ float relu(float a) { float retf = max(a, 0.f); return (retf); } // Keep Sigmoid in float only. When using half, cast to float before calling. __device__ __inline__ float sigmoid(float a) { float retf = 1.f / (1.f + expf(-a)); return (retf); } // FP64 Wrapper around cublas GEMMEx hipblasStatus_t mlp_gemm( hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, float* alpha, const double* A, int lda, const double* B, int ldb, const float* beta, double* C, int ldc) { return hipblasGemmEx( handle, transa, transb, m, n, k, alpha, A, HIP_R_64F, lda, B, HIP_R_64F, ldb, beta, C, HIP_R_64F, ldc, HIP_R_64F, HIPBLAS_GEMM_DEFAULT); } // FP32 Wrapper around cublas GEMMEx hipblasStatus_t mlp_gemm( hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, float* alpha, const float* A, int lda, const float* B, int ldb, const float* beta, float* C, int ldc) { return hipblasGemmEx( handle, transa, transb, m, n, k, alpha, A, HIP_R_32F, lda, B, HIP_R_32F, ldb, beta, C, HIP_R_32F, ldc, HIP_R_32F, HIPBLAS_GEMM_DEFAULT); } // FP16 Tensor core wrapper around cublas GEMMEx hipblasStatus_t mlp_gemm( hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, float* alpha, const at::Half* A, int lda, const at::Half* B, int ldb, float* beta, at::Half* C, int ldc) { return hipblasGemmEx( handle, transa, transb, m, n, k, alpha, A, HIP_R_16F, lda, B, HIP_R_16F, ldb, beta, C, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } // Bias ADD. Assume input X is [features x batch size], column major. // Bias is one 'features' long vector, with implicit broadcast. template <typename T> __global__ void biasAdd_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Bias ADD + ReLU. Assume input X is [features x batch size], column major. // Activation support fuesed ReLU. Safe to call in-place. template <typename T> __global__ void biasAddRelu_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Compute grid size for pointwise backward kernel. // block_x/y is total elment being handled per block, not number of threads void get_biasAddRelu_bprop_grid_size( int yfeat, int batch_size, int block_x, int block_y, int* grid_x, int* grid_y) { *grid_x = (yfeat + block_x - 1) / block_x; // Get number of SMs for efficient reduction. int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // can switch to occupancy calculation. use 4 below now for sm_70 int max_blocks_y = num_SMs * 4 / (*grid_x); // block_y should be from minimal work per thread int nRedSplits = (batch_size + block_y - 1) / block_y; // increase number of elem per thread redcution to not launch more than enough // kernel adjust work, so here we just launch max block *grid_y = ::min(nRedSplits, max_blocks_y); return; } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAdd_bprop( T* dY, int features, int batch_size, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; db_local += (float)dY[flat_idx]; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { db_local += (float)dY[flat_idx]; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop_aligned( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // Accumulate db in FP32 always float db_local[ILP]; T r_y[ILP]; T r_dy[ILP]; #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } // f always <= features in this case //if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; // total threads in x == features/ILP #pragma unroll for (int u = 0; u < UNROLL_FACTOR; u++) { load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); flat_idx += features/ILP; } } // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y*ILP]; // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; float* smem_out = smem + ILP * linear_idx; #pragma unroll for(int ii=0;ii<ILP;ii++){ smem_out[ii] = db_local[ii]; // reuse local dy buffer } __syncthreads(); if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ float* smem_in = smem + ILP * (yidx * blockDim.x + threadIdx.x); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += smem_in[ii]; // reuse local dy buffer } } // block result is in db_local now for all threadIdx.y == 0 // TODO: maybe not useful early exit here if(gridDim.y == 1) { #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); return; } // Write out partial result load_store(out, db_local, f, 0); } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } float r_db[ILP]; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock) { if(threadIdx.y == 0){ for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; load_store(r_db, intermediate, 0, col * features / ILP + row); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += r_db[ii]; } } #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); } } } // Lists where the num_layers-1 intermediate Y buffers start in reserved space on fprop, starting // offset 0. The last Y value is, of course, stored in the user provided output buffer. void get_y_offsets( int batch_size, int num_layers, const int* output_features, int* y_start_offsets) { y_start_offsets[0] = 0; for (int i = 1; i < num_layers; i++) { y_start_offsets[i] = y_start_offsets[i - 1] + batch_size * output_features[i - 1]; } } // Returns the reserved space (in elements) needed for the MLP size_t get_mlp_reserved_space(int batch_size, int num_layers, const int* output_features) { size_t res_space = 0; // Need to store output of every intermediate MLP - size equal to output_features[i] * batch_size // for all 'i' in [0, num_layers-1) for (int l = 0; l < num_layers; l++) { res_space += output_features[l] * batch_size; } return res_space; } // Returns the size of all fprop activations combined size_t get_all_activations_size(int batch_size, int num_layers, const int* output_features) { size_t acts_size = 0; for (int l = 0; l < num_layers; l++) { acts_size += output_features[l] * batch_size; } return acts_size; } #if 0 // Returns the work space (in elements) needed for the MLP bprop. size_t get_mlp_bp_workspace (int batch_size, int num_layers, const int* output_features) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs */ size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2*get_all_activations_size(batch_size, num_layers, output_features); return work_space; } #endif // Scratch space needed for reductions in number of elements size_t get_reduction_scratch_space(int batch_size, int num_layers, const int* output_features) { size_t max_scratch_space = 0; // Loop over all layers to see which one needs the max scratch space for (int l = 0; l < num_layers; l++) { // need to find max(aligned, not_aligned) int tmp, res0, res1; int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res0); block_x = ILP * BIAS_RELU_BW_NTHREADS_X; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res1); max_scratch_space = ::max(max_scratch_space, (size_t)(output_features[l] * res0)); max_scratch_space = ::max(max_scratch_space, (size_t)(output_features[l] * res1)); } return max_scratch_space; } // Buffer for semaphores size_t get_semaphores_size(int num_layers, const int* output_features) { // Upper bound on semaphores is one per feature for the layer // with the most features. int max_features = 0; for (int l = 0; l < num_layers; l++) { max_features = ::max(max_features, output_features[l]); } return (size_t)max_features; } // Returns the work space (in elements) needed for the MLP bprop. template <typename T> size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features) { size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2 * get_all_activations_size(batch_size, num_layers, output_features) * sizeof(T); work_space += get_reduction_scratch_space(batch_size, num_layers, output_features) * sizeof(float); work_space += get_semaphores_size(num_layers, output_features) * sizeof(int); return work_space; } // Returns pointers to each segment of the workspace template <typename T> void partition_mlp_bp_workspace( int batch_size, int num_layers, const int* output_features, void* work_space, T** dy_gemms, T** dx_gemms, float** db_scratch, int** semaphores) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs : DB_SCRATCH : SEMAPHORES */ // Start address where dy_gemm tensors are stored *dy_gemms = reinterpret_cast<T*>(work_space); // Start address where dx_gemm tensors are stored *dx_gemms = *dy_gemms + get_all_activations_size(batch_size, num_layers, output_features); // Start address where db intermediate tensors are stored *db_scratch = reinterpret_cast<float*>( *dx_gemms + get_all_activations_size(batch_size, num_layers, output_features)); // Start address of semaphores *semaphores = reinterpret_cast<int*>( *db_scratch + get_reduction_scratch_space(batch_size, num_layers, output_features)); return; } // Does a simple MLP fprop (GEMM+bias+ReLU). // Can handle num_layers number of layers, each with its own shape. Output of layer i is assumed // to be input of layer i+1. output_features, WPtr and BPtr are arrays of length num_layers, and // must be in the same order i.e. WPtr[i] and BPtr[i] are respectively the weight and bias of layer // 'i'. template <typename T> int mlp_fp( T* X, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T** BPtr, T* Y, T* reserved_space, int use_bias, int activation) { T *weight, *input, *output, *bias; T *reserved_space_x, *reserved_space_y; reserved_space_x = NULL; reserved_space_y = reserved_space; // Get cublas handle from Pytorch hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. hipStream_t stream; hipblasGetStream(handle, &stream); for (int layer = 0; layer < num_layers; layer++) { weight = WPtr[layer]; input = (layer == 0) ? X : reserved_space_x; output = (layer == num_layers - 1) ? Y : reserved_space_y; if (use_bias) { bias = BPtr[layer]; } int ifeat = (layer == 0) ? input_features : output_features[layer - 1]; int ofeat = output_features[layer]; float one = 1.f; float zero = 0.f; hipblasStatus_t cublas_status; // Call GEMM: fprop is Y = W'X cublas_status = mlp_gemm( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, ofeat, batch_size, ifeat, &one, weight, ifeat, input, ifeat, &zero, output, ofeat); if (cublas_status != HIPBLAS_STATUS_SUCCESS) { printf("GEMM fprop failed with %d\n", cublas_status); return 1; } const uint &input_size = ofeat; int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // Call biasReLU if(use_bias == 1) { if (activation == 0) { // no activation hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( biasAdd_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size); } else if (activation == 1) { // relu hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAddRelu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( biasAddRelu_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size); } else if (activation == 2) { // sigmoid hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( biasAdd_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, bias, batch_size, input_size); hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Sigmoid_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size); } } else { // don't need to do anything in case of no activation and no bias if (activation == 1) { // relu hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Relu_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size); } else if (activation == 2) { // sigmoid hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Sigmoid_fprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, output, batch_size, input_size); } } // Set current output as next layer input reserved_space_x = reserved_space_y; // Set next layer output reserved_space_y += ofeat * batch_size; } return 0; } // Does a simple MLP bprop (GEMM+bias+ReLU). // Needs reserved space to come back exactly as it was populated in fprop. // Does dgrad and wgrad sequentially. template <typename T> int mlp_bp( T* X, T* Y, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T* dY, T* reserved_space, T* work_space, T* dX, T** dwPtr, T** dbPtr, bool requires_grad, int use_bias, int activation) { T* weight; T *dweight, *dx, *dy, *dbias; T *x, *y; // Where the dx of the biasReLU (== dy of gemm) is stored. Can be thrown away // after bp call. T* dy_gemm_base; // Where the dx after GEMM is stored. T* dx_gemm_base; // Where partial reduction results are stored. float* db_scratch; // Semaphores for reduction. int* semaphores; partition_mlp_bp_workspace<T>( batch_size, num_layers, output_features, work_space, &dy_gemm_base, &dx_gemm_base, &db_scratch, &semaphores); size_t semaphore_size = get_semaphores_size(num_layers, output_features) * sizeof(int); // Get cublas handle from Pytorch hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. hipStream_t stream; hipblasGetStream(handle, &stream); int* y_offsets = (int*)malloc(num_layers * sizeof(int)); get_y_offsets(batch_size, num_layers, output_features, y_offsets); for (int layer = num_layers - 1; layer >= 0; layer--) { weight = WPtr[layer]; dweight = dwPtr[layer]; // x is read from reserved space x = (layer == 0) ? X : reserved_space + y_offsets[layer - 1]; // dx is written in workspace for all but layer==0 dx = (layer == 0) ? dX : dx_gemm_base + y_offsets[layer - 1]; // y is read from reserved space y = (layer == num_layers - 1) ? Y : reserved_space + y_offsets[layer]; // dx from layer+1 dy = (layer == num_layers - 1) ? dY : dx_gemm_base + y_offsets[layer]; // dy_gemm is written to and read immediately T* dy_gemm = dy_gemm_base + y_offsets[layer]; dbias = dbPtr[layer]; int xfeat = (layer == 0) ? input_features : output_features[layer - 1]; int yfeat = output_features[layer]; float one = 1.f; float zero = 0.f; if (use_bias == 1) { if (activation == 0) { // no acitvation // bgrad dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; hipMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( biasAdd_bprop<T, 4>), dim3(grid), dim3(block), 0, stream, dy, yfeat, batch_size, db_scratch, semaphores, dbias); // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; hipMemsetAsync(semaphores, 0, semaphore_size, stream); if(yfeat % (ILP * BIAS_RELU_BW_NTHREADS_X) == 0 && is_aligned(y) && is_aligned(dy) && is_aligned(dy_gemm) && is_aligned(dbias)){ int block_x = ILP * BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( biasAddRelu_bprop_aligned<T, 4>), dim3(grid), dim3(block), 0, stream, y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } else { int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( biasAddRelu_bprop<T, 4>), dim3(grid), dim3(block), 0, stream, y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } } else if (activation == 2) { // sigmoid // activation backward int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Sigmoid_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm); // bgrad, from dy_gemm dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; hipMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( biasAdd_bprop<T, 4>), dim3(grid), dim3(block), 0, stream, dy_gemm, yfeat, batch_size, db_scratch, semaphores, dbias); } } else { // no bias below if (activation == 0) { // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Relu_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm); } else if (activation == 2) { // sigmoid int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; hipOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); hipLaunchKernelGGL(( Sigmoid_bprop), dim3(num_SMs*num_blocks), dim3(BIAS_RELU_FW_NTHREADS), 0, stream, dy, y, batch_size, yfeat, dy_gemm); } } hipblasStatus_t cublas_status; // Call GEMM dgrad if (layer > 0 || requires_grad == 1) { cublas_status = mlp_gemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, xfeat, batch_size, yfeat, &one, weight, xfeat, dy_gemm, yfeat, &zero, dx, xfeat); if (cublas_status != HIPBLAS_STATUS_SUCCESS) { printf("GEMM dgrad failed with %d\n", cublas_status); return 1; } } // Call GEMM wgrad cublas_status = mlp_gemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_T, xfeat, yfeat, batch_size, &one, x, xfeat, dy_gemm, yfeat, &zero, dweight, xfeat); if (cublas_status != HIPBLAS_STATUS_SUCCESS) { printf("GEMM wgrad failed with %d\n", cublas_status); return 1; } } return 0; } // Instantiate for floating point types template int mlp_fp<float>( float* X, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float** BPtr, float* Y, float* reserved_space, int use_bias, int activation); template int mlp_bp<float>( float* X, float* Y, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float* dY, float* reserved_space, float* work_space, float* dX, float** dwPtr, float** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<at::Half>( at::Half* X, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half** BPtr, at::Half* Y, at::Half* reserved_space, int use_bias, int activation); template int mlp_bp<at::Half>( at::Half* X, at::Half* Y, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half* dY, at::Half* reserved_space, at::Half* work_space, at::Half* dX, at::Half** dwPtr, at::Half** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<double>( double* X, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double** BPtr, double* Y, double* reserved_space, int use_bias, int activation); template int mlp_bp<double>( double* X, double* Y, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double* dY, double* reserved_space, double* work_space, double* dX, double** dwPtr, double** dbPtr, bool requires_grad, int use_bias, int activation); template size_t get_mlp_bp_workspace_in_bytes<float>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<at::Half>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<double>( int batch_size, int num_layers, const int* output_features);
6de5034676637eb88adeeb7e6c6e5abfb03489d5.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <torch/torch.h> /* Includes, cuda */ #include <cublas_v2.h> #include <cuda_runtime.h> // constants for fused bias+relu kernel #define BIAS_RELU_FW_NTHREADS 128 // forward number of thread per block #define BIAS_RELU_BW_NTHREADS_X 32 // backward number of thread in feature dim #define BIAS_RELU_BW_NTHREADS_Y 16 // backward number of thread in batch dim #define BIAS_RELU_RED_PER_THREAD 16 // backward minimal reduction length per thread // move to a header later on #define ILP 4 template<typename T> __host__ __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(T* dst, volatile T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } template<typename T> __device__ __forceinline__ void load_store(volatile T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } // Keep ReLU in float only. When using half, cast to float before calling. __device__ __inline__ float relu(float a) { float retf = max(a, 0.f); return (retf); } // Keep Sigmoid in float only. When using half, cast to float before calling. __device__ __inline__ float sigmoid(float a) { float retf = 1.f / (1.f + expf(-a)); return (retf); } // FP64 Wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const double* A, int lda, const double* B, int ldb, const float* beta, double* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_64F, lda, B, CUDA_R_64F, ldb, beta, C, CUDA_R_64F, ldc, CUDA_R_64F, CUBLAS_GEMM_DEFAULT); } // FP32 Wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const float* A, int lda, const float* B, int ldb, const float* beta, float* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_32F, lda, B, CUDA_R_32F, ldb, beta, C, CUDA_R_32F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT); } // FP16 Tensor core wrapper around cublas GEMMEx cublasStatus_t mlp_gemm( cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, float* alpha, const at::Half* A, int lda, const at::Half* B, int ldb, float* beta, at::Half* C, int ldc) { return cublasGemmEx( handle, transa, transb, m, n, k, alpha, A, CUDA_R_16F, lda, B, CUDA_R_16F, ldb, beta, C, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP); } // Bias ADD. Assume input X is [features x batch size], column major. // Bias is one 'features' long vector, with implicit broadcast. template <typename T> __global__ void biasAdd_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = bias_sum; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Bias ADD + ReLU. Assume input X is [features x batch size], column major. // Activation support fuesed ReLU. Safe to call in-place. template <typename T> __global__ void biasAddRelu_fprop(T *X, T *b, uint batch_size, uint features) { T r_x[ILP]; T r_b[ILP]; if(is_aligned(X) && is_aligned(b) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { int row = tid % (features / ILP); load_store(r_x, X, 0 , tid); load_store(r_b, b, 0 , row); #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { int row = tid % features; r_x[ii] = X[idx]; r_b[ii] = b[row]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float bias_sum = static_cast<float>(r_x[ii]) + static_cast<float>(r_b[ii]); r_x[ii] = relu(bias_sum); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = relu(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_fprop(T *X, uint batch_size, uint features) { T r_x[ILP]; if(is_aligned(X) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_x, X, 0 , tid); #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } load_store(X, r_x, tid , 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_x[ii] = X[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_x[ii] = sigmoid(static_cast<float>(r_x[ii])); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { X[idx] = r_x[ii]; } } } } } // ReLU. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Relu_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Sigmoid. Assume input X is [features x batch size], column major. // Safe to call in-place. template <typename T> __global__ void Sigmoid_bprop(T *dY, T *Y, uint batch_size, uint features, T *dX) { T r_dy[ILP]; T r_y[ILP]; if(is_aligned(dY) && is_aligned(Y) && is_aligned(dX) && features % ILP ==0) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid*ILP < features * batch_size; tid += blockDim.x * gridDim.x) { load_store(r_dy, dY, 0 , tid); load_store(r_y, Y, 0 , tid); #pragma unroll for(int ii=0;ii<ILP;ii++){ float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } load_store(dX, r_dy, tid, 0); } } else { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < features * batch_size; tid += ILP * blockDim.x * gridDim.x) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { r_dy[ii] = dY[idx]; r_y[ii] = Y[idx]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { float grad_out = r_dy[ii]; float out = r_y[ii]; float grad_i = out * ( 1.f - out) * grad_out; r_dy[ii] = grad_i; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int idx = tid + ii * blockDim.x * gridDim.x; if(idx < features * batch_size) { dX[idx] = r_dy[ii]; } } } } } // Compute grid size for pointwise backward kernel. // block_x/y is total elment being handled per block, not number of threads void get_biasAddRelu_bprop_grid_size( int yfeat, int batch_size, int block_x, int block_y, int* grid_x, int* grid_y) { *grid_x = (yfeat + block_x - 1) / block_x; // Get number of SMs for efficient reduction. int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // can switch to occupancy calculation. use 4 below now for sm_70 int max_blocks_y = num_SMs * 4 / (*grid_x); // block_y should be from minimal work per thread int nRedSplits = (batch_size + block_y - 1) / block_y; // increase number of elem per thread redcution to not launch more than enough // kernel adjust work, so here we just launch max block *grid_y = std::min(nRedSplits, max_blocks_y); return; } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAdd_bprop( T* dY, int features, int batch_size, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; db_local += (float)dY[flat_idx]; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { db_local += (float)dY[flat_idx]; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y]; // Accumulate db in FP32 always float db_local = 0; if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features + row; #pragma unroll 4 for (int u = 0; u < UNROLL_FACTOR; u++) { T y_val = Y[flat_idx]; T dy_val = dY[flat_idx]; T dx_val; if ((float)y_val > 0.f) dx_val = dy_val; else dx_val = 0; dX[flat_idx] = dx_val; db_local += (float)dx_val; flat_idx += features; } } // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; smem[linear_idx] = db_local; } __syncthreads(); if (f < features) { if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ db_local += smem[yidx * blockDim.x + threadIdx.x]; } // block result is in db_local now for all threadIdx.y == 0 // Write out partial result out[f] = db_local; } } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0 && f < features) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); db_local = 0; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock && f < features) { if(threadIdx.y == 0) { for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; db_local += (float)(intermediate[col * features + row]); } db[f] = (T)db_local; } } } // Addition done deterministically via a 2-pass approach. Each CTA writes out partial // sum, and the last CTA in grid Y dimension accumulates partials serially and writes to result. template <typename T, int UNROLL_FACTOR> __global__ void biasAddRelu_bprop_aligned( T* Y, T* dY, int features, int batch_size, T* dX, volatile float* intermediate, int* semaphores, T* db) { // The feature that this thread is responsible for int f = blockIdx.x * blockDim.x + threadIdx.x; // Compute the span this thread is responsible for // For this block int b_chunkSize = (batch_size + gridDim.y - 1) / gridDim.y; int b_nStart = blockIdx.y * b_chunkSize; int b_nSpan = min(batch_size, b_nStart + b_chunkSize) - b_nStart; // For this thread int chunkSize = (b_chunkSize + blockDim.y - 1) / blockDim.y; int nStart = threadIdx.y * chunkSize + b_nStart; int nSpan = min(b_nStart + b_nSpan, nStart + chunkSize) - nStart; volatile float* out = intermediate + blockIdx.y * features; // Flag to trigger last reduction. __shared__ bool isLastBlock; // Accumulate db in FP32 always float db_local[ILP]; T r_y[ILP]; T r_dy[ILP]; #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } // f always <= features in this case //if (f < features) { int nidx = 0; // Handle non-multiple of UNROLL_FACTOR residue for (; nidx < nSpan % UNROLL_FACTOR; nidx++) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); } // Handle meat of work for (; (nidx + UNROLL_FACTOR - 1) < nSpan; nidx += UNROLL_FACTOR) { int row, col, flat_idx; row = f; col = nStart + nidx; flat_idx = col * features / ILP + row; // total threads in x == features/ILP #pragma unroll for (int u = 0; u < UNROLL_FACTOR; u++) { load_store(r_y, Y, 0, flat_idx); load_store(r_dy, dY, 0, flat_idx); #pragma unroll for(int ii=0;ii<ILP;ii++){ if ((float)r_y[ii] <= 0.f) r_dy[ii] = 0; db_local[ii] += (float)r_dy[ii]; } load_store(dX, r_dy, flat_idx, 0); flat_idx += features/ILP; } } // we know block size for now __shared__ float smem[BIAS_RELU_BW_NTHREADS_X*BIAS_RELU_BW_NTHREADS_Y*ILP]; // naive block reduction on y-dim int linear_idx = threadIdx.y * blockDim.x + threadIdx.x; float* smem_out = smem + ILP * linear_idx; #pragma unroll for(int ii=0;ii<ILP;ii++){ smem_out[ii] = db_local[ii]; // reuse local dy buffer } __syncthreads(); if(threadIdx.y == 0) { for(int yidx = 1; yidx < blockDim.y; yidx++){ float* smem_in = smem + ILP * (yidx * blockDim.x + threadIdx.x); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += smem_in[ii]; // reuse local dy buffer } } // block result is in db_local now for all threadIdx.y == 0 // TODO: maybe not useful early exit here if(gridDim.y == 1) { #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); return; } // Write out partial result load_store(out, db_local, f, 0); } __threadfence(); __syncthreads(); // Increment semaphore and check if this is the last CTA in the grid_y dimension. // Only thread (0,0) calls this if (threadIdx.x == 0 && threadIdx.y == 0) { unsigned int sum_idx; sum_idx = atomicAdd(&(semaphores[blockIdx.x]), 1); isLastBlock = (sum_idx == (gridDim.y - 1)); } __syncthreads(); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] = 0.f; } float r_db[ILP]; // No block reduction for now, only thread (*,0) do grid reduction if (isLastBlock) { if(threadIdx.y == 0){ for (int n = 0; n < gridDim.y; n++) { int row, col; row = f; col = n; load_store(r_db, intermediate, 0, col * features / ILP + row); #pragma unroll for(int ii=0;ii<ILP;ii++){ db_local[ii] += r_db[ii]; } } #pragma unroll for(int ii=0;ii<ILP;ii++){ r_dy[ii] = db_local[ii]; // reuse local dy buffer } load_store(db, r_dy, f, 0); } } } // Lists where the num_layers-1 intermediate Y buffers start in reserved space on fprop, starting // offset 0. The last Y value is, of course, stored in the user provided output buffer. void get_y_offsets( int batch_size, int num_layers, const int* output_features, int* y_start_offsets) { y_start_offsets[0] = 0; for (int i = 1; i < num_layers; i++) { y_start_offsets[i] = y_start_offsets[i - 1] + batch_size * output_features[i - 1]; } } // Returns the reserved space (in elements) needed for the MLP size_t get_mlp_reserved_space(int batch_size, int num_layers, const int* output_features) { size_t res_space = 0; // Need to store output of every intermediate MLP - size equal to output_features[i] * batch_size // for all 'i' in [0, num_layers-1) for (int l = 0; l < num_layers; l++) { res_space += output_features[l] * batch_size; } return res_space; } // Returns the size of all fprop activations combined size_t get_all_activations_size(int batch_size, int num_layers, const int* output_features) { size_t acts_size = 0; for (int l = 0; l < num_layers; l++) { acts_size += output_features[l] * batch_size; } return acts_size; } #if 0 // Returns the work space (in elements) needed for the MLP bprop. size_t get_mlp_bp_workspace (int batch_size, int num_layers, const int* output_features) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs */ size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2*get_all_activations_size(batch_size, num_layers, output_features); return work_space; } #endif // Scratch space needed for reductions in number of elements size_t get_reduction_scratch_space(int batch_size, int num_layers, const int* output_features) { size_t max_scratch_space = 0; // Loop over all layers to see which one needs the max scratch space for (int l = 0; l < num_layers; l++) { // need to find max(aligned, not_aligned) int tmp, res0, res1; int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res0); block_x = ILP * BIAS_RELU_BW_NTHREADS_X; get_biasAddRelu_bprop_grid_size( output_features[l], batch_size, block_x, block_y, &tmp, &res1); max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res0)); max_scratch_space = std::max(max_scratch_space, (size_t)(output_features[l] * res1)); } return max_scratch_space; } // Buffer for semaphores size_t get_semaphores_size(int num_layers, const int* output_features) { // Upper bound on semaphores is one per feature for the layer // with the most features. int max_features = 0; for (int l = 0; l < num_layers; l++) { max_features = std::max(max_features, output_features[l]); } return (size_t)max_features; } // Returns the work space (in elements) needed for the MLP bprop. template <typename T> size_t get_mlp_bp_workspace_in_bytes(int batch_size, int num_layers, const int* output_features) { size_t work_space = 0; // Store each intermediate dY explicitly. Need 2 dYs per MLP layer (one for o/p // of biasReLU_bp and one for o/p of dgrad GEMM). work_space += 2 * get_all_activations_size(batch_size, num_layers, output_features) * sizeof(T); work_space += get_reduction_scratch_space(batch_size, num_layers, output_features) * sizeof(float); work_space += get_semaphores_size(num_layers, output_features) * sizeof(int); return work_space; } // Returns pointers to each segment of the workspace template <typename T> void partition_mlp_bp_workspace( int batch_size, int num_layers, const int* output_features, void* work_space, T** dy_gemms, T** dx_gemms, float** db_scratch, int** semaphores) { /* Workspace is partitioned as DY_GEMMs : DX_GEMMs : DB_SCRATCH : SEMAPHORES */ // Start address where dy_gemm tensors are stored *dy_gemms = reinterpret_cast<T*>(work_space); // Start address where dx_gemm tensors are stored *dx_gemms = *dy_gemms + get_all_activations_size(batch_size, num_layers, output_features); // Start address where db intermediate tensors are stored *db_scratch = reinterpret_cast<float*>( *dx_gemms + get_all_activations_size(batch_size, num_layers, output_features)); // Start address of semaphores *semaphores = reinterpret_cast<int*>( *db_scratch + get_reduction_scratch_space(batch_size, num_layers, output_features)); return; } // Does a simple MLP fprop (GEMM+bias+ReLU). // Can handle num_layers number of layers, each with its own shape. Output of layer i is assumed // to be input of layer i+1. output_features, WPtr and BPtr are arrays of length num_layers, and // must be in the same order i.e. WPtr[i] and BPtr[i] are respectively the weight and bias of layer // 'i'. template <typename T> int mlp_fp( T* X, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T** BPtr, T* Y, T* reserved_space, int use_bias, int activation) { T *weight, *input, *output, *bias; T *reserved_space_x, *reserved_space_y; reserved_space_x = NULL; reserved_space_y = reserved_space; // Get cublas handle from Pytorch cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. cudaStream_t stream; cublasGetStream(handle, &stream); for (int layer = 0; layer < num_layers; layer++) { weight = WPtr[layer]; input = (layer == 0) ? X : reserved_space_x; output = (layer == num_layers - 1) ? Y : reserved_space_y; if (use_bias) { bias = BPtr[layer]; } int ifeat = (layer == 0) ? input_features : output_features[layer - 1]; int ofeat = output_features[layer]; float one = 1.f; float zero = 0.f; cublasStatus_t cublas_status; // Call GEMM: fprop is Y = W'X cublas_status = mlp_gemm( handle, CUBLAS_OP_T, CUBLAS_OP_N, ofeat, batch_size, ifeat, &one, weight, ifeat, input, ifeat, &zero, output, ofeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM fprop failed with %d\n", cublas_status); return 1; } const uint &input_size = ofeat; int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; // Call biasReLU if(use_bias == 1) { if (activation == 0) { // no activation cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); } else if (activation == 1) { // relu cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAddRelu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAddRelu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); } else if (activation == 2) { // sigmoid cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, biasAdd_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); biasAdd_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, bias, batch_size, input_size); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } } else { // don't need to do anything in case of no activation and no bias if (activation == 1) { // relu cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Relu_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } else if (activation == 2) { // sigmoid cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_fprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_fprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(output, batch_size, input_size); } } // Set current output as next layer input reserved_space_x = reserved_space_y; // Set next layer output reserved_space_y += ofeat * batch_size; } return 0; } // Does a simple MLP bprop (GEMM+bias+ReLU). // Needs reserved space to come back exactly as it was populated in fprop. // Does dgrad and wgrad sequentially. template <typename T> int mlp_bp( T* X, T* Y, int input_features, int batch_size, T** WPtr, int num_layers, int* output_features, T* dY, T* reserved_space, T* work_space, T* dX, T** dwPtr, T** dbPtr, bool requires_grad, int use_bias, int activation) { T* weight; T *dweight, *dx, *dy, *dbias; T *x, *y; // Where the dx of the biasReLU (== dy of gemm) is stored. Can be thrown away // after bp call. T* dy_gemm_base; // Where the dx after GEMM is stored. T* dx_gemm_base; // Where partial reduction results are stored. float* db_scratch; // Semaphores for reduction. int* semaphores; partition_mlp_bp_workspace<T>( batch_size, num_layers, output_features, work_space, &dy_gemm_base, &dx_gemm_base, &db_scratch, &semaphores); size_t semaphore_size = get_semaphores_size(num_layers, output_features) * sizeof(int); // Get cublas handle from Pytorch cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); // Get the stream from cublas handle to reuse for biasReLU kernel. cudaStream_t stream; cublasGetStream(handle, &stream); int* y_offsets = (int*)malloc(num_layers * sizeof(int)); get_y_offsets(batch_size, num_layers, output_features, y_offsets); for (int layer = num_layers - 1; layer >= 0; layer--) { weight = WPtr[layer]; dweight = dwPtr[layer]; // x is read from reserved space x = (layer == 0) ? X : reserved_space + y_offsets[layer - 1]; // dx is written in workspace for all but layer==0 dx = (layer == 0) ? dX : dx_gemm_base + y_offsets[layer - 1]; // y is read from reserved space y = (layer == num_layers - 1) ? Y : reserved_space + y_offsets[layer]; // dx from layer+1 dy = (layer == num_layers - 1) ? dY : dx_gemm_base + y_offsets[layer]; // dy_gemm is written to and read immediately T* dy_gemm = dy_gemm_base + y_offsets[layer]; dbias = dbPtr[layer]; int xfeat = (layer == 0) ? input_features : output_features[layer - 1]; int yfeat = output_features[layer]; float one = 1.f; float zero = 0.f; if (use_bias == 1) { if (activation == 0) { // no acitvation // bgrad dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>( dy, yfeat, batch_size, db_scratch, semaphores, dbias); // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); if(yfeat % (ILP * BIAS_RELU_BW_NTHREADS_X) == 0 && is_aligned(y) && is_aligned(dy) && is_aligned(dy_gemm) && is_aligned(dbias)){ int block_x = ILP * BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAddRelu_bprop_aligned<T, 4><<<grid, block, 0, stream>>>( y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } else { int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAddRelu_bprop<T, 4><<<grid, block, 0, stream>>>( y, dy, yfeat, batch_size, dy_gemm, db_scratch, semaphores, dbias); } } else if (activation == 2) { // sigmoid // activation backward int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); // bgrad, from dy_gemm dim3 block(BIAS_RELU_BW_NTHREADS_X, BIAS_RELU_BW_NTHREADS_Y); int grid_x, grid_y; cudaMemsetAsync(semaphores, 0, semaphore_size, stream); int block_x = BIAS_RELU_BW_NTHREADS_X; int block_y = BIAS_RELU_RED_PER_THREAD * BIAS_RELU_BW_NTHREADS_Y; get_biasAddRelu_bprop_grid_size(yfeat, batch_size, block_x, block_y, &grid_x, &grid_y); dim3 grid(grid_x, grid_y); biasAdd_bprop<T, 4><<<grid, block, 0, stream>>>( dy_gemm, yfeat, batch_size, db_scratch, semaphores, dbias); } } else { // no bias below if (activation == 0) { // bypass dgrad through reset pointer dy_gemm = dy; } else if (activation == 1) { // relu int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Relu_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Relu_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); } else if (activation == 2) { // sigmoid int num_blocks = 0; int num_SMs = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; cudaOccupancyMaxActiveBlocksPerMultiprocessor(&num_blocks, Sigmoid_bprop<T>, BIAS_RELU_FW_NTHREADS, 0); Sigmoid_bprop<<<num_SMs*num_blocks, BIAS_RELU_FW_NTHREADS, 0, stream>>>(dy, y, batch_size, yfeat, dy_gemm); } } cublasStatus_t cublas_status; // Call GEMM dgrad if (layer > 0 || requires_grad == 1) { cublas_status = mlp_gemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, xfeat, batch_size, yfeat, &one, weight, xfeat, dy_gemm, yfeat, &zero, dx, xfeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM dgrad failed with %d\n", cublas_status); return 1; } } // Call GEMM wgrad cublas_status = mlp_gemm( handle, CUBLAS_OP_N, CUBLAS_OP_T, xfeat, yfeat, batch_size, &one, x, xfeat, dy_gemm, yfeat, &zero, dweight, xfeat); if (cublas_status != CUBLAS_STATUS_SUCCESS) { printf("GEMM wgrad failed with %d\n", cublas_status); return 1; } } return 0; } // Instantiate for floating point types template int mlp_fp<float>( float* X, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float** BPtr, float* Y, float* reserved_space, int use_bias, int activation); template int mlp_bp<float>( float* X, float* Y, int input_features, int batch_size, float** WPtr, int num_layers, int* output_features, float* dY, float* reserved_space, float* work_space, float* dX, float** dwPtr, float** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<at::Half>( at::Half* X, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half** BPtr, at::Half* Y, at::Half* reserved_space, int use_bias, int activation); template int mlp_bp<at::Half>( at::Half* X, at::Half* Y, int input_features, int batch_size, at::Half** WPtr, int num_layers, int* output_features, at::Half* dY, at::Half* reserved_space, at::Half* work_space, at::Half* dX, at::Half** dwPtr, at::Half** dbPtr, bool requires_grad, int use_bias, int activation); template int mlp_fp<double>( double* X, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double** BPtr, double* Y, double* reserved_space, int use_bias, int activation); template int mlp_bp<double>( double* X, double* Y, int input_features, int batch_size, double** WPtr, int num_layers, int* output_features, double* dY, double* reserved_space, double* work_space, double* dX, double** dwPtr, double** dbPtr, bool requires_grad, int use_bias, int activation); template size_t get_mlp_bp_workspace_in_bytes<float>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<at::Half>( int batch_size, int num_layers, const int* output_features); template size_t get_mlp_bp_workspace_in_bytes<double>( int batch_size, int num_layers, const int* output_features);
cb36ed2322ec85ec0b01f7494bfd359b2f225938.hip
// !!! This is a file automatically generated by hipify!!! /* Author: Bohao Zhang Oct. 22 2019 arm_planning mex This code aims to replace the contructor of the rotatotope The hyperparameters that are directly hardcoded in this code are: 1. k_dim --> which dimension in R is k-dependent 2. origin shift [x,y,z] --> origin shift of the robot links 3. buffer distance --> the total distance buffered for the obstacles 4. TOO_SMALL_POLYTOPE_JUDGE --> A criteria for the square of the 2-norm of the generator 5. CONSERVATIVE_BUFFER --> a small offset directly applied to the constraint functions 6. t_plan 7. t_move 8. t_total 9. number of links 10. number of time steps 11. the zonotope of links 12. the zonotope of end effectors 13. the zonotope of base 14. rot_axes --> which axis should be rotated around for each joint 15. link / EE reduce order */ #include "rotatotope_NLP.h" #include<iostream> using namespace Ipopt; const bool debugMode = false; /* Instruction: This is the mex function to replace generate_matrices() multiply() in the constructor of rotatotope Requires: 1. trig_FRS{j}(i) . Z --> the Z of zonotopes in trig_FRS, index: i \in 1 : n_links * 2 j \in 1 : n_time_steps trig_FRS(i,j).Z = (i * n_time_steps + j) * 10 : (i * n_time_steps + j + 1) * 10 - 1 we need trig_FRS(:, 1 : n * 2) for n th link 2. number of obstacles 3. zonotopes of obstacles (1 center with 3 generators) 4. k_opt input for debugging 5. q 6. q_dot 7. q_des 8. g_k */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { std::clock_t start_t, end_t; // timing /* P0. process the input */ if (nrhs != 8) { mexErrMsgIdAndTxt("MyProg:ConvertString","*** Incorrect number of input!"); } uint32_t n_links = 3; uint32_t n_joints = n_links * 2; uint32_t n_time_steps = 100; double* R = mxGetPr(prhs[0]); uint32_t R_width = (uint32_t)mxGetM(prhs[0]); uint32_t R_length = (uint32_t)mxGetN(prhs[0]); double* dev_R; hipMalloc((void**)&dev_R, R_width * R_length * sizeof(double)); hipMemcpy(dev_R, R, R_width * R_length * sizeof(double), hipMemcpyHostToDevice); uint32_t n_obstacles = (uint32_t)(*mxGetPr(prhs[1])); double* OZ = mxGetPr(prhs[2]); uint32_t OZ_width = (uint32_t)mxGetM(prhs[2]); uint32_t OZ_length = (uint32_t)mxGetN(prhs[2]); double* k_opt = mxGetPr(prhs[3]); double* q = mxGetPr(prhs[4]); double* q_dot = mxGetPr(prhs[5]); double* q_des = mxGetPr(prhs[6]); double* g_k = mxGetPr(prhs[7]); double link_Z[18] = { 0.1778, 0, 0, 0.1778, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0}; uint32_t link_Z_width = 3; uint32_t link_Z_length = 6; double EE_Z[6] = { 0.3556, 0, 0, 0.3302, 0, 0}; uint32_t EE_Z_width = 3; uint32_t EE_Z_length = 2; double base_Z[3] = { 0.1206, 0, 0.0825}; uint32_t base_Z_width = 3; uint32_t base_Z_length = 1; start_t = clock(); /* P1. generate all the rotatotopes */ uint32_t R_unit_length = R_length / (n_joints * n_time_steps); // equivalent with length(obj.R) uint8_t rot_axes[6] = { 3, 2, 1, 2, 1, 2 }, *dev_rot_axes; // rot_axes can be directly defined here. no need for mex for now. hipMalloc((void**)&dev_rot_axes, 6 * sizeof(uint8_t)); hipMemcpy(dev_rot_axes, rot_axes, 6 * sizeof(uint8_t), hipMemcpyHostToDevice); // should promise that link + 3 * point <= 45, so that combination size <= 1024 uint32_t link_reduce_order = 15; uint32_t point_reduce_order = 10; rotatotopeArray links = rotatotopeArray(n_links, n_time_steps, 2, R, dev_R, R_unit_length, dev_rot_axes, link_Z, link_Z_width, link_Z_length, link_reduce_order, g_k); rotatotopeArray EEs = rotatotopeArray(n_links - 1, n_time_steps, 2, R, dev_R, R_unit_length, dev_rot_axes, EE_Z, EE_Z_width, EE_Z_length, point_reduce_order, g_k); rotatotopeArray base = rotatotopeArray(n_links - 2, n_time_steps, 1, R, dev_R, R_unit_length, dev_rot_axes, base_Z, base_Z_width, base_Z_length, point_reduce_order, g_k); links.debugMode = debugMode; /* P2. stack the rotatotopes */ links.stack(EEs, base); /* P3. generate the constraints */ links.generate_constraints(n_obstacles, OZ, OZ_width, OZ_length); uint32_t n_pairs = 1; uint32_t self_pairs[2] = {0, 2}; // the latter one in the pair is always higher links.generate_self_constraints(n_pairs, self_pairs); end_t = clock(); mexPrintf("CUDA: Construct Rotatotopes time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC)); /* P4. solve the NLP */ SmartPtr<rotatotope_NLP> mynlp = new rotatotope_NLP(); mynlp->set_parameters(&links, q, q_dot, q_des, g_k, n_obstacles); // Create a new instance of IpoptApplication // (use a SmartPtr, not raw) // We are using the factory, since this allows us to compile this // example with an Ipopt Windows DLL SmartPtr<IpoptApplication> app = IpoptApplicationFactory(); // Change some options // Note: The following choices are only examples, they might not be // suitable for your optimization problem. app->Options()->SetNumericValue("tol", 1e-6); app->Options()->SetNumericValue("max_cpu_time", 0.5); app->Options()->SetNumericValue("print_level", 0); app->Options()->SetStringValue("mu_strategy", "adaptive"); app->Options()->SetStringValue("output_file", "ipopt.out"); if(debugMode){ app->Options()->SetStringValue("derivative_test", "second-order"); app->Options()->SetNumericValue("derivative_test_perturbation", 0.000001); } // Initialize the IpoptApplication and process the options ApplicationReturnStatus status; status = app->Initialize(); if( status != Solve_Succeeded ) { mexErrMsgIdAndTxt("MyProg:ConvertString", "*** Error during initialization!"); } // Ask Ipopt to solve the problem status = app->OptimizeTNLP(mynlp); nlhs = 1; if( status == Solve_Succeeded ) { plhs[0] = mxCreateNumericMatrix(n_links * 2, 1, mxDOUBLE_CLASS, mxREAL); double *output0 = (double*)mxGetData(plhs[0]); for (uint32_t i = 0; i < n_links * 2; i++) { output0[i] = mynlp->solution[i]; } } else { plhs[0] = mxCreateNumericMatrix(1, 1, mxINT32_CLASS, mxREAL); int *output0 = (int*)mxGetData(plhs[0]); *output0 = -12345; } end_t = clock(); mexPrintf("CUDA: IPOPT solve time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC)); /* P5. handle the output, release the memory */ if(debugMode){ uint32_t link_id = 0; uint32_t RZ_length = links.RZ_length[link_id]; if(links.debug_RZ == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** debug_RZ is empty!"); } mxArray* output1 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(links.Z_width, RZ_length, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 0; p < links.Z_width; p++) { pt[t * links.Z_width + p] = links.debug_RZ[(k * RZ_length + t) * links.Z_width + p]; } } mxSetCell(output1, k, time_step_k); } plhs[1] = output1; mxArray* output2 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(1, RZ_length); bool *pt = (bool*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { pt[t] = links.debug_c_idx[k * RZ_length + t]; } mxSetCell(output2, k, time_step_k); } plhs[2] = output2; mxArray* output3 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(4 * (link_id + 1), RZ_length, mxUINT8_CLASS, mxREAL); uint8_t *pt = (uint8_t*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 0; p < 2 * (link_id + 1); p++) { pt[t * 4 * (link_id + 1) + p] = links.debug_k_idx[(p * n_time_steps + k) * RZ_length + t]; } } for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 2 * (link_id + 1); p < 4 * (link_id + 1); p++) { pt[t * 4 * (link_id + 1) + p] = links.debug_C_idx[((p - 2 * (link_id + 1)) * n_time_steps + k) * RZ_length + t]; } } mxSetCell(output3, k, time_step_k); } plhs[3] = output3; if(links.A_con[0] == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** A_con is empty!"); } mxArray* output4 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, links.k_con_num[j][k], mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < links.k_con_num[j][k]; t++) { for (uint32_t p = 0; p < constraint_length; p++) { pt[t * constraint_length + p] = links.A_con[j][((i * n_time_steps + k) * constraint_length + p) * links.max_k_con_num[j] + t]; } } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output4, i, obstacle_i); } plhs[4] = output4; mxArray* output5 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.d_con[j][(i * n_time_steps + k) * constraint_length + p]; } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output5, i, obstacle_i); } plhs[5] = output5; mxArray* output6 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.delta_con[j][(i * n_time_steps + k) * constraint_length + p]; } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output6, i, obstacle_i); } plhs[6] = output6; mxArray* output7 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(2 * (j + 1), links.k_con_num[j][k]); bool *pt = mxGetLogicals(time_step_k); for (uint32_t t = 0; t < links.k_con_num[j][k]; t++) { for (uint32_t p = 0; p < 2 * (j + 1); p++) { pt[t * 2 * (j + 1) + p] = links.k_con[j][(p * n_time_steps + k) * RZ_length + t]; } } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output7, i, obstacle_i); } plhs[7] = output7; links.evaluate_constraints(k_opt); plhs[8] = mxCreateNumericMatrix((n_links * n_obstacles + n_pairs) * n_time_steps, 1, mxDOUBLE_CLASS, mxREAL); double *output8 = (double*)mxGetData(plhs[8]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { output8[(i * n_links + j) * n_time_steps + k] = links.con[(j * n_obstacles + i) * n_time_steps + k]; } } } for(uint32_t i = 0; i < n_pairs; i++){ for (uint32_t j = 0; j < n_time_steps; j++) { output8[i * n_time_steps + j + n_obstacles * n_links * n_time_steps] = links.con_self[i * n_time_steps + j]; } } plhs[9] = mxCreateNumericMatrix(n_links * 2, (n_links * n_obstacles + n_pairs) * n_time_steps, mxDOUBLE_CLASS, mxREAL); double *output9 = (double*)mxGetData(plhs[9]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { for (uint32_t p = 0; p < n_links * 2; p++) { output9[((i * n_links + j) * n_time_steps + k) * n_links * 2 + p] = links.jaco_con[((j * n_obstacles + i) * n_time_steps + k) * n_links * 2 + p]; } } } } for (uint32_t i = 0; i < n_pairs; i++) { for (uint32_t j = 0; j < n_time_steps; j++) { for (uint32_t p = 0; p < n_links * 2; p++) { output9[(i * n_time_steps + j + n_obstacles * n_links * n_time_steps) * n_links * 2 + p] = links.jaco_con_self[(i * n_time_steps + j) * n_links * 2 + p]; } } } plhs[10] = mxCreateNumericMatrix(n_links * (n_links * 2 - 1), (n_links * n_obstacles + n_pairs) * n_time_steps, mxDOUBLE_CLASS, mxREAL); double *output10 = (double*)mxGetData(plhs[10]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { for (uint32_t p = 0; p < n_links * (n_links * 2 - 1); p++) { output10[((i * n_links + j) * n_time_steps + k) * n_links * (n_links * 2 - 1) + p] = links.hess_con[((j * n_obstacles + i) * n_time_steps + k) * n_links * (n_links * 2 - 1) + p]; } } } } for (uint32_t i = 0; i < n_pairs; i++) { for (uint32_t j = 0; j < n_time_steps; j++) { for (uint32_t p = 0; p < n_links * (n_links * 2 - 1); p++) { output10[(i * n_time_steps + j + n_obstacles * n_links * n_time_steps) * n_links * (n_links * 2 - 1) + p] = links.hess_con_self[(i * n_time_steps + j) * n_links * (n_links * 2 - 1) + p]; } } } /* if(links.A_con_self[0] == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** A_con_self is empty!"); } mxArray* output11 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, links.k_con_num_self[i][k], mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < links.k_con_num_self[i][k]; t++) { for (uint32_t p = 0; p < constraint_length; p++) { pt[t * constraint_length + p] = links.A_con_self[i][(k * constraint_length + p) * links.max_k_con_num_self[i] + t]; } } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output11, i, pair_i); } plhs[11] = output11; mxArray* output12 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.d_con_self[i][k * constraint_length + p]; } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output12, i, pair_i); } plhs[12] = output12; mxArray* output13 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.delta_con_self[i][k * constraint_length + p]; } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output13, i, pair_i); } plhs[13] = output13; mxArray* output14 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(2 * (2 + 1), links.k_con_num_self[i][k]); bool *pt = mxGetLogicals(time_step_k); for (uint32_t t = 0; t < links.k_con_num_self[i][k]; t++) { for (uint32_t p = 0; p < 2 * (2 + 1); p++) { pt[t * 2 * (2 + 1) + p] = links.k_con_self[i][(p * n_time_steps + k) * links.RZ_length[0] + t]; } } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output14, i, pair_i); } plhs[14] = output14; */ } hipFree(dev_R); hipFree(dev_rot_axes); }
cb36ed2322ec85ec0b01f7494bfd359b2f225938.cu
/* Author: Bohao Zhang Oct. 22 2019 arm_planning mex This code aims to replace the contructor of the rotatotope The hyperparameters that are directly hardcoded in this code are: 1. k_dim --> which dimension in R is k-dependent 2. origin shift [x,y,z] --> origin shift of the robot links 3. buffer distance --> the total distance buffered for the obstacles 4. TOO_SMALL_POLYTOPE_JUDGE --> A criteria for the square of the 2-norm of the generator 5. CONSERVATIVE_BUFFER --> a small offset directly applied to the constraint functions 6. t_plan 7. t_move 8. t_total 9. number of links 10. number of time steps 11. the zonotope of links 12. the zonotope of end effectors 13. the zonotope of base 14. rot_axes --> which axis should be rotated around for each joint 15. link / EE reduce order */ #include "rotatotope_NLP.h" #include<iostream> using namespace Ipopt; const bool debugMode = false; /* Instruction: This is the mex function to replace generate_matrices() multiply() in the constructor of rotatotope Requires: 1. trig_FRS{j}(i) . Z --> the Z of zonotopes in trig_FRS, index: i \in 1 : n_links * 2 j \in 1 : n_time_steps trig_FRS(i,j).Z = (i * n_time_steps + j) * 10 : (i * n_time_steps + j + 1) * 10 - 1 we need trig_FRS(:, 1 : n * 2) for n th link 2. number of obstacles 3. zonotopes of obstacles (1 center with 3 generators) 4. k_opt input for debugging 5. q 6. q_dot 7. q_des 8. g_k */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { std::clock_t start_t, end_t; // timing /* P0. process the input */ if (nrhs != 8) { mexErrMsgIdAndTxt("MyProg:ConvertString","*** Incorrect number of input!"); } uint32_t n_links = 3; uint32_t n_joints = n_links * 2; uint32_t n_time_steps = 100; double* R = mxGetPr(prhs[0]); uint32_t R_width = (uint32_t)mxGetM(prhs[0]); uint32_t R_length = (uint32_t)mxGetN(prhs[0]); double* dev_R; cudaMalloc((void**)&dev_R, R_width * R_length * sizeof(double)); cudaMemcpy(dev_R, R, R_width * R_length * sizeof(double), cudaMemcpyHostToDevice); uint32_t n_obstacles = (uint32_t)(*mxGetPr(prhs[1])); double* OZ = mxGetPr(prhs[2]); uint32_t OZ_width = (uint32_t)mxGetM(prhs[2]); uint32_t OZ_length = (uint32_t)mxGetN(prhs[2]); double* k_opt = mxGetPr(prhs[3]); double* q = mxGetPr(prhs[4]); double* q_dot = mxGetPr(prhs[5]); double* q_des = mxGetPr(prhs[6]); double* g_k = mxGetPr(prhs[7]); double link_Z[18] = { 0.1778, 0, 0, 0.1778, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0, 0.1651, 0, 0}; uint32_t link_Z_width = 3; uint32_t link_Z_length = 6; double EE_Z[6] = { 0.3556, 0, 0, 0.3302, 0, 0}; uint32_t EE_Z_width = 3; uint32_t EE_Z_length = 2; double base_Z[3] = { 0.1206, 0, 0.0825}; uint32_t base_Z_width = 3; uint32_t base_Z_length = 1; start_t = clock(); /* P1. generate all the rotatotopes */ uint32_t R_unit_length = R_length / (n_joints * n_time_steps); // equivalent with length(obj.R) uint8_t rot_axes[6] = { 3, 2, 1, 2, 1, 2 }, *dev_rot_axes; // rot_axes can be directly defined here. no need for mex for now. cudaMalloc((void**)&dev_rot_axes, 6 * sizeof(uint8_t)); cudaMemcpy(dev_rot_axes, rot_axes, 6 * sizeof(uint8_t), cudaMemcpyHostToDevice); // should promise that link + 3 * point <= 45, so that combination size <= 1024 uint32_t link_reduce_order = 15; uint32_t point_reduce_order = 10; rotatotopeArray links = rotatotopeArray(n_links, n_time_steps, 2, R, dev_R, R_unit_length, dev_rot_axes, link_Z, link_Z_width, link_Z_length, link_reduce_order, g_k); rotatotopeArray EEs = rotatotopeArray(n_links - 1, n_time_steps, 2, R, dev_R, R_unit_length, dev_rot_axes, EE_Z, EE_Z_width, EE_Z_length, point_reduce_order, g_k); rotatotopeArray base = rotatotopeArray(n_links - 2, n_time_steps, 1, R, dev_R, R_unit_length, dev_rot_axes, base_Z, base_Z_width, base_Z_length, point_reduce_order, g_k); links.debugMode = debugMode; /* P2. stack the rotatotopes */ links.stack(EEs, base); /* P3. generate the constraints */ links.generate_constraints(n_obstacles, OZ, OZ_width, OZ_length); uint32_t n_pairs = 1; uint32_t self_pairs[2] = {0, 2}; // the latter one in the pair is always higher links.generate_self_constraints(n_pairs, self_pairs); end_t = clock(); mexPrintf("CUDA: Construct Rotatotopes time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC)); /* P4. solve the NLP */ SmartPtr<rotatotope_NLP> mynlp = new rotatotope_NLP(); mynlp->set_parameters(&links, q, q_dot, q_des, g_k, n_obstacles); // Create a new instance of IpoptApplication // (use a SmartPtr, not raw) // We are using the factory, since this allows us to compile this // example with an Ipopt Windows DLL SmartPtr<IpoptApplication> app = IpoptApplicationFactory(); // Change some options // Note: The following choices are only examples, they might not be // suitable for your optimization problem. app->Options()->SetNumericValue("tol", 1e-6); app->Options()->SetNumericValue("max_cpu_time", 0.5); app->Options()->SetNumericValue("print_level", 0); app->Options()->SetStringValue("mu_strategy", "adaptive"); app->Options()->SetStringValue("output_file", "ipopt.out"); if(debugMode){ app->Options()->SetStringValue("derivative_test", "second-order"); app->Options()->SetNumericValue("derivative_test_perturbation", 0.000001); } // Initialize the IpoptApplication and process the options ApplicationReturnStatus status; status = app->Initialize(); if( status != Solve_Succeeded ) { mexErrMsgIdAndTxt("MyProg:ConvertString", "*** Error during initialization!"); } // Ask Ipopt to solve the problem status = app->OptimizeTNLP(mynlp); nlhs = 1; if( status == Solve_Succeeded ) { plhs[0] = mxCreateNumericMatrix(n_links * 2, 1, mxDOUBLE_CLASS, mxREAL); double *output0 = (double*)mxGetData(plhs[0]); for (uint32_t i = 0; i < n_links * 2; i++) { output0[i] = mynlp->solution[i]; } } else { plhs[0] = mxCreateNumericMatrix(1, 1, mxINT32_CLASS, mxREAL); int *output0 = (int*)mxGetData(plhs[0]); *output0 = -12345; } end_t = clock(); mexPrintf("CUDA: IPOPT solve time: %.6f ms\n", 1000.0 * (end_t - start_t) / (double)(CLOCKS_PER_SEC)); /* P5. handle the output, release the memory */ if(debugMode){ uint32_t link_id = 0; uint32_t RZ_length = links.RZ_length[link_id]; if(links.debug_RZ == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** debug_RZ is empty!"); } mxArray* output1 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(links.Z_width, RZ_length, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 0; p < links.Z_width; p++) { pt[t * links.Z_width + p] = links.debug_RZ[(k * RZ_length + t) * links.Z_width + p]; } } mxSetCell(output1, k, time_step_k); } plhs[1] = output1; mxArray* output2 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(1, RZ_length); bool *pt = (bool*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { pt[t] = links.debug_c_idx[k * RZ_length + t]; } mxSetCell(output2, k, time_step_k); } plhs[2] = output2; mxArray* output3 = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(4 * (link_id + 1), RZ_length, mxUINT8_CLASS, mxREAL); uint8_t *pt = (uint8_t*)mxGetData(time_step_k); for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 0; p < 2 * (link_id + 1); p++) { pt[t * 4 * (link_id + 1) + p] = links.debug_k_idx[(p * n_time_steps + k) * RZ_length + t]; } } for (uint32_t t = 0; t < RZ_length; t++) { for (uint32_t p = 2 * (link_id + 1); p < 4 * (link_id + 1); p++) { pt[t * 4 * (link_id + 1) + p] = links.debug_C_idx[((p - 2 * (link_id + 1)) * n_time_steps + k) * RZ_length + t]; } } mxSetCell(output3, k, time_step_k); } plhs[3] = output3; if(links.A_con[0] == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** A_con is empty!"); } mxArray* output4 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, links.k_con_num[j][k], mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < links.k_con_num[j][k]; t++) { for (uint32_t p = 0; p < constraint_length; p++) { pt[t * constraint_length + p] = links.A_con[j][((i * n_time_steps + k) * constraint_length + p) * links.max_k_con_num[j] + t]; } } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output4, i, obstacle_i); } plhs[4] = output4; mxArray* output5 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.d_con[j][(i * n_time_steps + k) * constraint_length + p]; } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output5, i, obstacle_i); } plhs[5] = output5; mxArray* output6 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; uint32_t buff_obstacle_length = RZ_length + 3; uint32_t constraint_length = ((buff_obstacle_length - 1) * (buff_obstacle_length - 2)) / 2; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.delta_con[j][(i * n_time_steps + k) * constraint_length + p]; } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output6, i, obstacle_i); } plhs[6] = output6; mxArray* output7 = mxCreateCellMatrix(1, n_obstacles); for (uint32_t i = 0; i < n_obstacles; i++) { mxArray* obstacle_i = mxCreateCellMatrix(1, n_links); for (uint32_t j = 0; j < n_links; j++) { uint32_t RZ_length = links.RZ_length[j]; mxArray* link_j = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(2 * (j + 1), links.k_con_num[j][k]); bool *pt = mxGetLogicals(time_step_k); for (uint32_t t = 0; t < links.k_con_num[j][k]; t++) { for (uint32_t p = 0; p < 2 * (j + 1); p++) { pt[t * 2 * (j + 1) + p] = links.k_con[j][(p * n_time_steps + k) * RZ_length + t]; } } mxSetCell(link_j, k, time_step_k); } mxSetCell(obstacle_i, j, link_j); } mxSetCell(output7, i, obstacle_i); } plhs[7] = output7; links.evaluate_constraints(k_opt); plhs[8] = mxCreateNumericMatrix((n_links * n_obstacles + n_pairs) * n_time_steps, 1, mxDOUBLE_CLASS, mxREAL); double *output8 = (double*)mxGetData(plhs[8]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { output8[(i * n_links + j) * n_time_steps + k] = links.con[(j * n_obstacles + i) * n_time_steps + k]; } } } for(uint32_t i = 0; i < n_pairs; i++){ for (uint32_t j = 0; j < n_time_steps; j++) { output8[i * n_time_steps + j + n_obstacles * n_links * n_time_steps] = links.con_self[i * n_time_steps + j]; } } plhs[9] = mxCreateNumericMatrix(n_links * 2, (n_links * n_obstacles + n_pairs) * n_time_steps, mxDOUBLE_CLASS, mxREAL); double *output9 = (double*)mxGetData(plhs[9]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { for (uint32_t p = 0; p < n_links * 2; p++) { output9[((i * n_links + j) * n_time_steps + k) * n_links * 2 + p] = links.jaco_con[((j * n_obstacles + i) * n_time_steps + k) * n_links * 2 + p]; } } } } for (uint32_t i = 0; i < n_pairs; i++) { for (uint32_t j = 0; j < n_time_steps; j++) { for (uint32_t p = 0; p < n_links * 2; p++) { output9[(i * n_time_steps + j + n_obstacles * n_links * n_time_steps) * n_links * 2 + p] = links.jaco_con_self[(i * n_time_steps + j) * n_links * 2 + p]; } } } plhs[10] = mxCreateNumericMatrix(n_links * (n_links * 2 - 1), (n_links * n_obstacles + n_pairs) * n_time_steps, mxDOUBLE_CLASS, mxREAL); double *output10 = (double*)mxGetData(plhs[10]); for (uint32_t i = 0; i < n_obstacles; i++) { for (uint32_t j = 0; j < n_links; j++) { for (uint32_t k = 0; k < n_time_steps; k++) { for (uint32_t p = 0; p < n_links * (n_links * 2 - 1); p++) { output10[((i * n_links + j) * n_time_steps + k) * n_links * (n_links * 2 - 1) + p] = links.hess_con[((j * n_obstacles + i) * n_time_steps + k) * n_links * (n_links * 2 - 1) + p]; } } } } for (uint32_t i = 0; i < n_pairs; i++) { for (uint32_t j = 0; j < n_time_steps; j++) { for (uint32_t p = 0; p < n_links * (n_links * 2 - 1); p++) { output10[(i * n_time_steps + j + n_obstacles * n_links * n_time_steps) * n_links * (n_links * 2 - 1) + p] = links.hess_con_self[(i * n_time_steps + j) * n_links * (n_links * 2 - 1) + p]; } } } /* if(links.A_con_self[0] == nullptr){ mexErrMsgIdAndTxt("MyProg:ConvertString","*** A_con_self is empty!"); } mxArray* output11 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, links.k_con_num_self[i][k], mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t t = 0; t < links.k_con_num_self[i][k]; t++) { for (uint32_t p = 0; p < constraint_length; p++) { pt[t * constraint_length + p] = links.A_con_self[i][(k * constraint_length + p) * links.max_k_con_num_self[i] + t]; } } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output11, i, pair_i); } plhs[11] = output11; mxArray* output12 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.d_con_self[i][k * constraint_length + p]; } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output12, i, pair_i); } plhs[12] = output12; mxArray* output13 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { uint32_t gen_zono_length = links.RZ_length[2]; uint32_t constraint_length = ((gen_zono_length - 1) * (gen_zono_length - 2)) / 2; mxArray* time_step_k = mxCreateNumericMatrix(constraint_length, 1, mxDOUBLE_CLASS, mxREAL); double *pt = (double*)mxGetData(time_step_k); for (uint32_t p = 0; p < constraint_length; p++) { pt[p] = links.delta_con_self[i][k * constraint_length + p]; } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output13, i, pair_i); } plhs[13] = output13; mxArray* output14 = mxCreateCellMatrix(1, n_pairs); for (uint32_t i = 0; i < n_pairs; i++) { mxArray* pair_i = mxCreateCellMatrix(1, n_time_steps); for (uint32_t k = 0; k < n_time_steps; k++) { mxArray* time_step_k = mxCreateLogicalMatrix(2 * (2 + 1), links.k_con_num_self[i][k]); bool *pt = mxGetLogicals(time_step_k); for (uint32_t t = 0; t < links.k_con_num_self[i][k]; t++) { for (uint32_t p = 0; p < 2 * (2 + 1); p++) { pt[t * 2 * (2 + 1) + p] = links.k_con_self[i][(p * n_time_steps + k) * links.RZ_length[0] + t]; } } mxSetCell(pair_i, k, time_step_k); } mxSetCell(output14, i, pair_i); } plhs[14] = output14; */ } cudaFree(dev_R); cudaFree(dev_rot_axes); }
c9dadb6135a066734c875817ece961a6c24a835c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <math_constants.h> // in CUDA SDK, for CUDART_NAN_F #include <faiss/gpu/impl/VectorResidual.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/Tensor.cuh> #include <algorithm> namespace faiss { namespace gpu { template <typename IndexT, typename CentroidT, bool LargeDim> __global__ void calcResidual( Tensor<float, 2, true> vecs, Tensor<CentroidT, 2, true> centroids, Tensor<IndexT, 1, true> vecToCentroid, Tensor<float, 2, true> residuals) { auto vec = vecs[blockIdx.x]; auto residual = residuals[blockIdx.x]; IndexT centroidId = vecToCentroid[blockIdx.x]; // Vector could be invalid (containing NaNs), so -1 was the // classified centroid if (centroidId == -1) { if (LargeDim) { for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { residual[i] = CUDART_NAN_F; } } else { residual[threadIdx.x] = CUDART_NAN_F; } return; } auto centroid = centroids[centroidId]; if (LargeDim) { for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { residual[i] = vec[i] - ConvertTo<float>::to(centroid[i]); } } else { residual[threadIdx.x] = vec[threadIdx.x] - ConvertTo<float>::to(centroid[threadIdx.x]); } } template <typename IndexT, typename CentroidT> void calcResidual( Tensor<float, 2, true>& vecs, Tensor<CentroidT, 2, true>& centroids, Tensor<IndexT, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, hipStream_t stream) { FAISS_ASSERT(vecs.getSize(1) == centroids.getSize(1)); FAISS_ASSERT(vecs.getSize(1) == residuals.getSize(1)); FAISS_ASSERT(vecs.getSize(0) == vecToCentroid.getSize(0)); FAISS_ASSERT(vecs.getSize(0) == residuals.getSize(0)); dim3 grid(vecs.getSize(0)); int maxThreads = getMaxThreadsCurrentDevice(); bool largeDim = vecs.getSize(1) > maxThreads; dim3 block(::min(vecs.getSize(1), maxThreads)); if (largeDim) { hipLaunchKernelGGL(( calcResidual<IndexT, CentroidT, true>), dim3(grid), dim3(block), 0, stream, vecs, centroids, vecToCentroid, residuals); } else { hipLaunchKernelGGL(( calcResidual<IndexT, CentroidT, false>), dim3(grid), dim3(block), 0, stream, vecs, centroids, vecToCentroid, residuals); } CUDA_TEST_ERROR(); } void runCalcResidual( Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& centroids, Tensor<idx_t, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, hipStream_t stream) { calcResidual<idx_t, float>( vecs, centroids, vecToCentroid, residuals, stream); } void runCalcResidual( Tensor<float, 2, true>& vecs, Tensor<half, 2, true>& centroids, Tensor<idx_t, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, hipStream_t stream) { calcResidual<idx_t, half>( vecs, centroids, vecToCentroid, residuals, stream); } template <typename IndexT, typename T> __global__ void gatherReconstructByIds( Tensor<IndexT, 1, true> ids, Tensor<T, 2, true> vecs, Tensor<float, 2, true> out) { IndexT id = ids[blockIdx.x]; // FIXME: will update all GPU code shortly to use int64 indexing types, but // this is a minimal change to allow for >= 2^31 elements in a matrix // auto vec = vecs[id]; // auto outVec = out[blockIdx.x]; auto vec = vecs.data() + id * vecs.getSize(1); auto outVec = out.data() + blockIdx.x * out.getSize(1); Convert<T, float> conv; for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]); } } template <typename IndexT, typename T> __global__ void gatherReconstructByRange( IndexT start, IndexT num, Tensor<T, 2, true> vecs, Tensor<float, 2, true> out) { IndexT id = start + blockIdx.x; // FIXME: will update all GPU code shortly to use int64 indexing types, but // this is a minimal change to allow for >= 2^31 elements in a matrix // auto vec = vecs[id]; // auto outVec = out[blockIdx.x]; auto vec = vecs.data() + id * vecs.getSize(1); auto outVec = out.data() + blockIdx.x * out.getSize(1); Convert<T, float> conv; for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]); } } template <typename IndexT, typename T> void gatherReconstructByIds( Tensor<IndexT, 1, true>& ids, Tensor<T, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { FAISS_ASSERT(ids.getSize(0) == out.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == out.getSize(1)); dim3 grid(ids.getSize(0)); int maxThreads = getMaxThreadsCurrentDevice(); dim3 block(::min(vecs.getSize(1), maxThreads)); hipLaunchKernelGGL(( gatherReconstructByIds<IndexT, T>) , dim3(grid), dim3(block), 0, stream, ids, vecs, out); CUDA_TEST_ERROR(); } template <typename IndexT, typename T> void gatherReconstructByRange( IndexT start, IndexT num, Tensor<T, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { FAISS_ASSERT(num > 0); FAISS_ASSERT(num == out.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == out.getSize(1)); FAISS_ASSERT(start + num <= vecs.getSize(0)); dim3 grid(num); int maxThreads = getMaxThreadsCurrentDevice(); dim3 block(::min(vecs.getSize(1), maxThreads)); hipLaunchKernelGGL(( gatherReconstructByRange<IndexT, T>) , dim3(grid), dim3(block), 0, stream, start, num, vecs, out); CUDA_TEST_ERROR(); } void runReconstruct( Tensor<idx_t, 1, true>& ids, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { gatherReconstructByIds<idx_t, float>(ids, vecs, out, stream); } void runReconstruct( Tensor<idx_t, 1, true>& ids, Tensor<half, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { gatherReconstructByIds<idx_t, half>(ids, vecs, out, stream); } void runReconstruct( idx_t start, idx_t num, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { gatherReconstructByRange<idx_t, float>(start, num, vecs, out, stream); } void runReconstruct( idx_t start, idx_t num, Tensor<half, 2, true>& vecs, Tensor<float, 2, true>& out, hipStream_t stream) { gatherReconstructByRange<idx_t, half>(start, num, vecs, out, stream); } } // namespace gpu } // namespace faiss
c9dadb6135a066734c875817ece961a6c24a835c.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/impl/FaissAssert.h> #include <math_constants.h> // in CUDA SDK, for CUDART_NAN_F #include <faiss/gpu/impl/VectorResidual.cuh> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/Tensor.cuh> #include <algorithm> namespace faiss { namespace gpu { template <typename IndexT, typename CentroidT, bool LargeDim> __global__ void calcResidual( Tensor<float, 2, true> vecs, Tensor<CentroidT, 2, true> centroids, Tensor<IndexT, 1, true> vecToCentroid, Tensor<float, 2, true> residuals) { auto vec = vecs[blockIdx.x]; auto residual = residuals[blockIdx.x]; IndexT centroidId = vecToCentroid[blockIdx.x]; // Vector could be invalid (containing NaNs), so -1 was the // classified centroid if (centroidId == -1) { if (LargeDim) { for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { residual[i] = CUDART_NAN_F; } } else { residual[threadIdx.x] = CUDART_NAN_F; } return; } auto centroid = centroids[centroidId]; if (LargeDim) { for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { residual[i] = vec[i] - ConvertTo<float>::to(centroid[i]); } } else { residual[threadIdx.x] = vec[threadIdx.x] - ConvertTo<float>::to(centroid[threadIdx.x]); } } template <typename IndexT, typename CentroidT> void calcResidual( Tensor<float, 2, true>& vecs, Tensor<CentroidT, 2, true>& centroids, Tensor<IndexT, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, cudaStream_t stream) { FAISS_ASSERT(vecs.getSize(1) == centroids.getSize(1)); FAISS_ASSERT(vecs.getSize(1) == residuals.getSize(1)); FAISS_ASSERT(vecs.getSize(0) == vecToCentroid.getSize(0)); FAISS_ASSERT(vecs.getSize(0) == residuals.getSize(0)); dim3 grid(vecs.getSize(0)); int maxThreads = getMaxThreadsCurrentDevice(); bool largeDim = vecs.getSize(1) > maxThreads; dim3 block(std::min(vecs.getSize(1), maxThreads)); if (largeDim) { calcResidual<IndexT, CentroidT, true><<<grid, block, 0, stream>>>( vecs, centroids, vecToCentroid, residuals); } else { calcResidual<IndexT, CentroidT, false><<<grid, block, 0, stream>>>( vecs, centroids, vecToCentroid, residuals); } CUDA_TEST_ERROR(); } void runCalcResidual( Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& centroids, Tensor<idx_t, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, cudaStream_t stream) { calcResidual<idx_t, float>( vecs, centroids, vecToCentroid, residuals, stream); } void runCalcResidual( Tensor<float, 2, true>& vecs, Tensor<half, 2, true>& centroids, Tensor<idx_t, 1, true>& vecToCentroid, Tensor<float, 2, true>& residuals, cudaStream_t stream) { calcResidual<idx_t, half>( vecs, centroids, vecToCentroid, residuals, stream); } template <typename IndexT, typename T> __global__ void gatherReconstructByIds( Tensor<IndexT, 1, true> ids, Tensor<T, 2, true> vecs, Tensor<float, 2, true> out) { IndexT id = ids[blockIdx.x]; // FIXME: will update all GPU code shortly to use int64 indexing types, but // this is a minimal change to allow for >= 2^31 elements in a matrix // auto vec = vecs[id]; // auto outVec = out[blockIdx.x]; auto vec = vecs.data() + id * vecs.getSize(1); auto outVec = out.data() + blockIdx.x * out.getSize(1); Convert<T, float> conv; for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]); } } template <typename IndexT, typename T> __global__ void gatherReconstructByRange( IndexT start, IndexT num, Tensor<T, 2, true> vecs, Tensor<float, 2, true> out) { IndexT id = start + blockIdx.x; // FIXME: will update all GPU code shortly to use int64 indexing types, but // this is a minimal change to allow for >= 2^31 elements in a matrix // auto vec = vecs[id]; // auto outVec = out[blockIdx.x]; auto vec = vecs.data() + id * vecs.getSize(1); auto outVec = out.data() + blockIdx.x * out.getSize(1); Convert<T, float> conv; for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) { outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]); } } template <typename IndexT, typename T> void gatherReconstructByIds( Tensor<IndexT, 1, true>& ids, Tensor<T, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { FAISS_ASSERT(ids.getSize(0) == out.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == out.getSize(1)); dim3 grid(ids.getSize(0)); int maxThreads = getMaxThreadsCurrentDevice(); dim3 block(std::min(vecs.getSize(1), maxThreads)); gatherReconstructByIds<IndexT, T> <<<grid, block, 0, stream>>>(ids, vecs, out); CUDA_TEST_ERROR(); } template <typename IndexT, typename T> void gatherReconstructByRange( IndexT start, IndexT num, Tensor<T, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { FAISS_ASSERT(num > 0); FAISS_ASSERT(num == out.getSize(0)); FAISS_ASSERT(vecs.getSize(1) == out.getSize(1)); FAISS_ASSERT(start + num <= vecs.getSize(0)); dim3 grid(num); int maxThreads = getMaxThreadsCurrentDevice(); dim3 block(std::min(vecs.getSize(1), maxThreads)); gatherReconstructByRange<IndexT, T> <<<grid, block, 0, stream>>>(start, num, vecs, out); CUDA_TEST_ERROR(); } void runReconstruct( Tensor<idx_t, 1, true>& ids, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { gatherReconstructByIds<idx_t, float>(ids, vecs, out, stream); } void runReconstruct( Tensor<idx_t, 1, true>& ids, Tensor<half, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { gatherReconstructByIds<idx_t, half>(ids, vecs, out, stream); } void runReconstruct( idx_t start, idx_t num, Tensor<float, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { gatherReconstructByRange<idx_t, float>(start, num, vecs, out, stream); } void runReconstruct( idx_t start, idx_t num, Tensor<half, 2, true>& vecs, Tensor<float, 2, true>& out, cudaStream_t stream) { gatherReconstructByRange<idx_t, half>(start, num, vecs, out, stream); } } // namespace gpu } // namespace faiss
a123934c0669278a60b21955c49e7c9e45090e0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k < n){ bools[k] = !!idata[k]; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k < n){ if (bools[k] == 1){ odata[indices[k]] = idata[k]; } } } } }
a123934c0669278a60b21955c49e7c9e45090e0c.cu
#include "common.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k < n){ bools[k] = !!idata[k]; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int k = threadIdx.x + (blockIdx.x * blockDim.x); if (k < n){ if (bools[k] == 1){ odata[indices[k]] = idata[k]; } } } } }
f628c066d6e751667f7199dd6cf7dbe3e6070027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/solver.hpp" #include "caffe/layers/loss/w2_gd_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include <cfloat> namespace caffe { //----------------------------------- static __global__ void compute_sum(int num_spatial,int num, int channels, int spatial_dim, const float* bottom_sec_diff, const float * prob, float* sum) { CUDA_KERNEL_LOOP(i, num_spatial) { int n = i / spatial_dim; int s = i % spatial_dim; float temp = 0; for (int iter = 0; iter<channels-1; iter++) { int index = (n*channels+iter) * spatial_dim + s; temp += bottom_sec_diff[index] * prob[index]; } sum[i] = temp; } } static __global__ void secforward_kernel(const int count, const int num, const int channels, const int spatial_dim, const float* prob, const float* label, const float* bottom_sec_diff, const float* sum_secx_p, float* bottom_diff) { CUDA_KERNEL_LOOP(index, count) { const int n = index / spatial_dim / channels; const int c = index / spatial_dim % channels; const int s = index % spatial_dim; if (c < channels-1) bottom_diff[index] = bottom_sec_diff[index]*prob[index] - sum_secx_p[n*spatial_dim+s] * prob[index]; else bottom_diff[index] = 0; } } //----------------------------------- static __global__ void Dloss_forward_kernel(int count, int num,int channels, int spatial_dim, const float *in, const float *label, float * prob, float *loss_g, float * loss_d, float *loss_c) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; loss_g[n*spatial_dim+s] = -in[(n*channels+channels-1)*spatial_dim+s]; loss_d[n*spatial_dim+s] = -in[((n+num)*channels+channels-1)*spatial_dim+s]; #if 1 float max_value = in[(n*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[(n*channels+iter)*spatial_dim+s]); float sum = 0; int label_index = label[n*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) sum += exp(in[(n*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[(n*channels+iter)*spatial_dim+s] = exp(in[(n*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[n*spatial_dim+s] = -log(max(prob[(n*channels+label_index)*spatial_dim+s],float(FLT_MIN))); max_value = in[((n+num)*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[((n+num)*channels+iter)*spatial_dim+s]); sum = 0; for (int iter=0;iter<channels-1;iter++) sum += exp(in[((n+num)*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[((n+num)*channels+iter)*spatial_dim+s] = exp(in[((n+num)*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[(n+num)*spatial_dim+s] = -log(max(prob[((n+num)*channels+label_index)*spatial_dim+s],float(FLT_MIN))); #endif } } static __global__ void Gloss_forward_kernel(int count, int num,int channels, int spatial_dim, const float *in, const float *label, float * prob, float *loss_g, float * loss_c) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; loss_g[n*spatial_dim+s] = -in[(n*channels+channels-1)*spatial_dim+s]; #if 1 float max_value = in[(n*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[(n*channels+iter)*spatial_dim+s]); float sum = 0; int label_index = label[n*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) sum += exp(in[(n*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[(n*channels+iter)*spatial_dim+s] = exp(in[(n*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[n*spatial_dim+s] = -log(max(prob[(n*channels+label_index)*spatial_dim+s],float(FLT_MIN))); #endif } } static __global__ void Dloss_backward_kernel(int count, int num,int channels, int spatial_dim, const float *data_in,const float *label, const float *prob, float *diff_in) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; diff_in[(n*channels+channels-1)*spatial_dim+s] = 1; diff_in[((n+num)*channels+channels-1)*spatial_dim+s] = -1; #if 1 for (int iter=0;iter<channels-1;iter++) { diff_in[(n*channels+iter)*spatial_dim+s] = 1 * prob[(n*channels+iter)*spatial_dim+s]; diff_in[((n+num)*channels+iter)*spatial_dim+s] = 1 * prob[((n+num)*channels+iter)*spatial_dim+s]; } int label_index = label[n*spatial_dim+s]; diff_in[(n*channels+label_index)*spatial_dim+s] -= 1; label_index = label[n*spatial_dim+s]; diff_in[((n+num)*channels+label_index)*spatial_dim+s] -= 1; #endif } } static __global__ void Gloss_backward_kernel(int count, int num,int channels, int spatial_dim, const float *data_in,const float *label, const float *prob, float *diff_in) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; diff_in[(n*channels+channels-1)*spatial_dim+s] = -1; #if 1 for (int iter=0;iter<channels-1;iter++) { diff_in[(n*channels+iter)*spatial_dim+s] = 0.5 * 0.1 * prob[(n*channels+iter)*spatial_dim+s]; } int label_index = label[n*spatial_dim+s]; diff_in[(n*channels+label_index)*spatial_dim+s] -= 0.5 * 0.1; #endif } } void W2GdLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { float loss_g, loss_d, loss_c; if (Caffe::gan_type() == "train_dnet") { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( Dloss_forward_kernel), dim3(CAFFE_GET_BLOCKS(num/2*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num/2*height*width, num/2, channels, height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.mutable_gpu_data(), loss_g_.mutable_gpu_data(),loss_d_.mutable_gpu_data(), loss_c_.mutable_gpu_data()); loss_g = caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data()); loss_d = caffe_gpu_sum(loss_d_.count(),loss_d_.gpu_data()); loss_c = caffe_gpu_sum(loss_c_.count(),loss_c_.gpu_data()); top[0]->mutable_cpu_data()[0] = loss_d / float(num/2*height*width) - loss_g / float(num/2*height*width) + loss_c / float(num*height*width); } else { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( Gloss_forward_kernel), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num*height*width, num, channels, height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.mutable_gpu_data(), loss_g_.mutable_gpu_data(), loss_c_.mutable_gpu_data()); loss_g = caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data()); loss_c = caffe_gpu_sum(loss_c_.count(),loss_c_.gpu_data()); top[0]->mutable_cpu_data()[0] = loss_g / float(num*height*width) + 0.1 * loss_c / float(num*height*width); } } void W2GdLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { if (Caffe::second_pass() == false) { if (Caffe::gan_type() == "train_dnet") { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); float loss_weights_ = top[0]->cpu_diff()[0] / float(num/2*1*height*width); hipLaunchKernelGGL(( Dloss_backward_kernel), dim3(CAFFE_GET_BLOCKS(num/2*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num/2*height*width, num/2,channels,height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.gpu_data(), bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff()); } else { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); float loss_weights_ = top[0]->cpu_diff()[0] / float(num*1*height*width); hipLaunchKernelGGL(( Gloss_backward_kernel), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num*height*width, num,channels,height*width, bottom[0]->gpu_data(),bottom[1]->gpu_data(), prob_.gpu_data(), bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff()); } } else { } } void W2GdLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { #if 0 int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); hipLaunchKernelGGL(( compute_sum), dim3(CAFFE_GET_BLOCKS(num*height*width)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num*height*width, num, channels, height*width, bottom[0]->gpu_sec_diff(), prob_.gpu_data(), loss_c_.mutable_gpu_data()); hipLaunchKernelGGL(( secforward_kernel), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), num, channels, height*width, prob_.gpu_data(), bottom[1]->gpu_data(), bottom[0]->gpu_sec_diff(), loss_c_.gpu_data(), bottom[0]->mutable_gpu_diff()); const float loss_weight = top[0]->cpu_diff()[0] / float(num/2*channels*height*width) * 1; caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom[0]->mutable_gpu_diff()); #endif } } // namespace caffe
f628c066d6e751667f7199dd6cf7dbe3e6070027.cu
#include <vector> #include "caffe/solver.hpp" #include "caffe/layers/loss/w2_gd_loss_layer.hpp" #include "caffe/util/math_functions.hpp" #include <cfloat> namespace caffe { //----------------------------------- static __global__ void compute_sum(int num_spatial,int num, int channels, int spatial_dim, const float* bottom_sec_diff, const float * prob, float* sum) { CUDA_KERNEL_LOOP(i, num_spatial) { int n = i / spatial_dim; int s = i % spatial_dim; float temp = 0; for (int iter = 0; iter<channels-1; iter++) { int index = (n*channels+iter) * spatial_dim + s; temp += bottom_sec_diff[index] * prob[index]; } sum[i] = temp; } } static __global__ void secforward_kernel(const int count, const int num, const int channels, const int spatial_dim, const float* prob, const float* label, const float* bottom_sec_diff, const float* sum_secx_p, float* bottom_diff) { CUDA_KERNEL_LOOP(index, count) { const int n = index / spatial_dim / channels; const int c = index / spatial_dim % channels; const int s = index % spatial_dim; if (c < channels-1) bottom_diff[index] = bottom_sec_diff[index]*prob[index] - sum_secx_p[n*spatial_dim+s] * prob[index]; else bottom_diff[index] = 0; } } //----------------------------------- static __global__ void Dloss_forward_kernel(int count, int num,int channels, int spatial_dim, const float *in, const float *label, float * prob, float *loss_g, float * loss_d, float *loss_c) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; loss_g[n*spatial_dim+s] = -in[(n*channels+channels-1)*spatial_dim+s]; loss_d[n*spatial_dim+s] = -in[((n+num)*channels+channels-1)*spatial_dim+s]; #if 1 float max_value = in[(n*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[(n*channels+iter)*spatial_dim+s]); float sum = 0; int label_index = label[n*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) sum += exp(in[(n*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[(n*channels+iter)*spatial_dim+s] = exp(in[(n*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[n*spatial_dim+s] = -log(max(prob[(n*channels+label_index)*spatial_dim+s],float(FLT_MIN))); max_value = in[((n+num)*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[((n+num)*channels+iter)*spatial_dim+s]); sum = 0; for (int iter=0;iter<channels-1;iter++) sum += exp(in[((n+num)*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[((n+num)*channels+iter)*spatial_dim+s] = exp(in[((n+num)*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[(n+num)*spatial_dim+s] = -log(max(prob[((n+num)*channels+label_index)*spatial_dim+s],float(FLT_MIN))); #endif } } static __global__ void Gloss_forward_kernel(int count, int num,int channels, int spatial_dim, const float *in, const float *label, float * prob, float *loss_g, float * loss_c) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; loss_g[n*spatial_dim+s] = -in[(n*channels+channels-1)*spatial_dim+s]; #if 1 float max_value = in[(n*channels+0)*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) max_value = max(max_value,in[(n*channels+iter)*spatial_dim+s]); float sum = 0; int label_index = label[n*spatial_dim+s]; for (int iter=0;iter<channels-1;iter++) sum += exp(in[(n*channels+iter)*spatial_dim+s]-max_value); for (int iter=0;iter<channels-1;iter++) prob[(n*channels+iter)*spatial_dim+s] = exp(in[(n*channels+iter)*spatial_dim+s]-max_value) / sum; loss_c[n*spatial_dim+s] = -log(max(prob[(n*channels+label_index)*spatial_dim+s],float(FLT_MIN))); #endif } } static __global__ void Dloss_backward_kernel(int count, int num,int channels, int spatial_dim, const float *data_in,const float *label, const float *prob, float *diff_in) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; diff_in[(n*channels+channels-1)*spatial_dim+s] = 1; diff_in[((n+num)*channels+channels-1)*spatial_dim+s] = -1; #if 1 for (int iter=0;iter<channels-1;iter++) { diff_in[(n*channels+iter)*spatial_dim+s] = 1 * prob[(n*channels+iter)*spatial_dim+s]; diff_in[((n+num)*channels+iter)*spatial_dim+s] = 1 * prob[((n+num)*channels+iter)*spatial_dim+s]; } int label_index = label[n*spatial_dim+s]; diff_in[(n*channels+label_index)*spatial_dim+s] -= 1; label_index = label[n*spatial_dim+s]; diff_in[((n+num)*channels+label_index)*spatial_dim+s] -= 1; #endif } } static __global__ void Gloss_backward_kernel(int count, int num,int channels, int spatial_dim, const float *data_in,const float *label, const float *prob, float *diff_in) { CUDA_KERNEL_LOOP(i, count) { int n = i / spatial_dim; int s = i % spatial_dim; diff_in[(n*channels+channels-1)*spatial_dim+s] = -1; #if 1 for (int iter=0;iter<channels-1;iter++) { diff_in[(n*channels+iter)*spatial_dim+s] = 0.5 * 0.1 * prob[(n*channels+iter)*spatial_dim+s]; } int label_index = label[n*spatial_dim+s]; diff_in[(n*channels+label_index)*spatial_dim+s] -= 0.5 * 0.1; #endif } } void W2GdLossLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { float loss_g, loss_d, loss_c; if (Caffe::gan_type() == "train_dnet") { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); Dloss_forward_kernel<<<CAFFE_GET_BLOCKS(num/2*height*width), CAFFE_CUDA_NUM_THREADS>>> (num/2*height*width, num/2, channels, height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.mutable_gpu_data(), loss_g_.mutable_gpu_data(),loss_d_.mutable_gpu_data(), loss_c_.mutable_gpu_data()); loss_g = caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data()); loss_d = caffe_gpu_sum(loss_d_.count(),loss_d_.gpu_data()); loss_c = caffe_gpu_sum(loss_c_.count(),loss_c_.gpu_data()); top[0]->mutable_cpu_data()[0] = loss_d / float(num/2*height*width) - loss_g / float(num/2*height*width) + loss_c / float(num*height*width); } else { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); Gloss_forward_kernel<<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>> (num*height*width, num, channels, height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.mutable_gpu_data(), loss_g_.mutable_gpu_data(), loss_c_.mutable_gpu_data()); loss_g = caffe_gpu_sum(loss_g_.count(),loss_g_.gpu_data()); loss_c = caffe_gpu_sum(loss_c_.count(),loss_c_.gpu_data()); top[0]->mutable_cpu_data()[0] = loss_g / float(num*height*width) + 0.1 * loss_c / float(num*height*width); } } void W2GdLossLayer::Backward_gpu(const vector<Blob*>& top, const vector<Blob*>& bottom) { if (Caffe::second_pass() == false) { if (Caffe::gan_type() == "train_dnet") { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); float loss_weights_ = top[0]->cpu_diff()[0] / float(num/2*1*height*width); Dloss_backward_kernel<<<CAFFE_GET_BLOCKS(num/2*height*width), CAFFE_CUDA_NUM_THREADS>>> (num/2*height*width, num/2,channels,height*width, bottom[0]->gpu_data(), bottom[1]->gpu_data(), prob_.gpu_data(), bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff()); } else { int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); float loss_weights_ = top[0]->cpu_diff()[0] / float(num*1*height*width); Gloss_backward_kernel<<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>> (num*height*width, num,channels,height*width, bottom[0]->gpu_data(),bottom[1]->gpu_data(), prob_.gpu_data(), bottom[0]->mutable_gpu_diff()); caffe_gpu_scal(bottom[0]->count(),loss_weights_,bottom[0]->mutable_gpu_diff()); } } else { } } void W2GdLossLayer::SecForward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { #if 0 int num = bottom[0]->num(); int channels = bottom[0]->channels(); int height = bottom[0]->height(); int width = bottom[0]->width(); compute_sum<<<CAFFE_GET_BLOCKS(num*height*width), CAFFE_CUDA_NUM_THREADS>>> (num*height*width, num, channels, height*width, bottom[0]->gpu_sec_diff(), prob_.gpu_data(), loss_c_.mutable_gpu_data()); secforward_kernel<<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>> (bottom[0]->count(), num, channels, height*width, prob_.gpu_data(), bottom[1]->gpu_data(), bottom[0]->gpu_sec_diff(), loss_c_.gpu_data(), bottom[0]->mutable_gpu_diff()); const float loss_weight = top[0]->cpu_diff()[0] / float(num/2*channels*height*width) * 1; caffe_gpu_scal(bottom[0]->count(), loss_weight, bottom[0]->mutable_gpu_diff()); #endif } } // namespace caffe
9bd0932e9bba06ef29794540b5fa0e84d9d1a80f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <index_helper.cuh> namespace quda { #ifdef GPU_GAUGE_TOOLS template<typename Float, typename Fmunu, typename Gauge> struct FmunuArg { int threads; // number of active threads required int X[4]; // grid dimensions int border[4]; Fmunu f; Gauge gauge; FmunuArg(Fmunu &f, Gauge &gauge, const GaugeField &meta, const GaugeField &meta_ex) : threads(meta.VolumeCB()), f(f), gauge(gauge) { for (int dir=0; dir<4; ++dir) { X[dir] = meta.X()[dir]; border[dir] = (meta_ex.X()[dir] - X[dir])/2; } } }; template <int mu, int nu, typename Float, typename Arg> __device__ __forceinline__ void computeFmunuCore(Arg &arg, int idx, int parity) { typedef Matrix<complex<Float>,3> Link; int x[4]; auto &X = arg.X; getCoords(x, idx, X, parity); for (int dir=0; dir<4; ++dir) { x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } Link F; { // U(x,mu) U(x+mu,nu) U[dagger](x+nu,mu) U[dagger](x,nu) // load U(x)_(+mu) int dx[4] = {0, 0, 0, 0}; Link U1 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); // load U(x+mu)_(+nu) dx[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[mu]--; // load U(x+nu)_(+mu) dx[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); // compute plaquette F = U1 * U2 * conj(U3) * conj(U4); } { // U(x,nu) U[dagger](x+nu-mu,mu) U[dagger](x-mu,nu) U(x-mu, mu) // load U(x)_(+nu) int dx[4] = {0, 0, 0, 0}; Link U1 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); // load U(x+nu)_(-mu) = U(x+nu-mu)_(+mu) dx[nu]++; dx[mu]--; Link U2 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); dx[mu]++; dx[nu]--; // load U(x-mu)_nu dx[mu]--; Link U3 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[mu]++; // load U(x)_(-mu) = U(x-mu)_(+mu) dx[mu]--; Link U4 = arg.gauge(mu, linkIndexShift(x,dx,X),1-parity); dx[mu]++; // sum this contribution to Fmunu F += U1 * conj(U2) * conj(U3) * U4; } { // U[dagger](x-nu,nu) U(x-nu,mu) U(x+mu-nu,nu) U[dagger](x,mu) // load U(x)_(-nu) int dx[4] = {0, 0, 0, 0}; dx[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // load U(x-nu)_(+mu) dx[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // load U(x+mu-nu)_(+nu) dx[mu]++; dx[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]--; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); // sum this contribution to Fmunu F += conj(U1) * U2 * U3 * conj(U4); } { // U[dagger](x-mu,mu) U[dagger](x-mu-nu,nu) U(x-mu-nu,mu) U(x-nu,nu) // load U(x)_(-mu) int dx[4] = {0, 0, 0, 0}; dx[mu]--; Link U1 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[mu]++; // load U(x-mu)_(-nu) = U(x-mu-nu)_(+nu) dx[mu]--; dx[nu]--; Link U2 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]++; // load U(x-nu)_mu dx[mu]--; dx[nu]--; Link U3 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]++; // load U(x)_(-nu) = U(x-nu)_(+nu) dx[nu]--; Link U4 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // sum this contribution to Fmunu F += conj(U1) * conj(U2) * U3 * U4; } // 3 matrix additions, 12 matrix-matrix multiplications, 8 matrix conjugations // Each matrix conjugation involves 9 unary minus operations but these ar not included in the operation count // Each matrix addition involves 18 real additions // Each matrix-matrix multiplication involves 9*3 complex multiplications and 9*2 complex additions // = 9*3*6 + 9*2*2 = 198 floating-point ops // => Total number of floating point ops per site above is // 3*18 + 12*198 = 54 + 2376 = 2430 { F -= conj(F); // 18 real subtractions + one matrix conjugation F *= static_cast<Float>(0.125); // 18 real multiplications // 36 floating point operations here } constexpr int munu_idx = (mu*(mu-1))/2 + nu; // lower-triangular indexing arg.f(munu_idx, idx, parity) = F; } template<typename Float, typename Arg> __global__ void computeFmunuKernel(Arg arg){ int x_cb = threadIdx.x + blockIdx.x*blockDim.x; int parity = threadIdx.y + blockIdx.y*blockDim.y; int mu_nu = threadIdx.z + blockIdx.z*blockDim.z; if (x_cb >= arg.threads) return; if (mu_nu >= 6) return; switch(mu_nu) { // F[1,0], F[2,0], F[2,1], F[3,0], F[3,1], F[3,2] case 0: computeFmunuCore<1,0,Float>(arg, x_cb, parity); break; case 1: computeFmunuCore<2,0,Float>(arg, x_cb, parity); break; case 2: computeFmunuCore<2,1,Float>(arg, x_cb, parity); break; case 3: computeFmunuCore<3,0,Float>(arg, x_cb, parity); break; case 4: computeFmunuCore<3,1,Float>(arg, x_cb, parity); break; case 5: computeFmunuCore<3,2,Float>(arg, x_cb, parity); break; } } template<typename Float, typename Arg> void computeFmunuCPU(Arg &arg) { for (int parity=0; parity<2; parity++) { for (int x_cb=0; x_cb<arg.threads; x_cb++) { for (int mu=0; mu<4; mu++) { for (int nu=0; nu<mu; nu++) { int mu_nu = (mu*(mu-1))/2 + nu; switch(mu_nu) { // F[1,0], F[2,0], F[2,1], F[3,0], F[3,1], F[3,2] case 0: computeFmunuCore<1,0,Float>(arg, x_cb, parity); break; case 1: computeFmunuCore<2,0,Float>(arg, x_cb, parity); break; case 2: computeFmunuCore<2,1,Float>(arg, x_cb, parity); break; case 3: computeFmunuCore<3,0,Float>(arg, x_cb, parity); break; case 4: computeFmunuCore<3,1,Float>(arg, x_cb, parity); break; case 5: computeFmunuCore<3,2,Float>(arg, x_cb, parity); break; } } } } } } template<typename Float, typename Arg> class FmunuCompute : TunableVectorYZ { Arg &arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: FmunuCompute(Arg &arg, const GaugeField &meta, QudaFieldLocation location) : TunableVectorYZ(2,6), arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,meta.Stride(),sizeof(Float)); } virtual ~FmunuCompute() {} void apply(const hipStream_t &stream){ if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); hipLaunchKernelGGL(( computeFmunuKernel<Float>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg); } else { computeFmunuCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return (2430 + 36)*6*2*(long long)arg.threads; } long long bytes() const { return ((16*arg.gauge.Bytes() + arg.f.Bytes())*6*2*arg.threads); } // Ignores link reconstruction }; // FmunuCompute template<typename Float, typename Fmunu, typename Gauge> void computeFmunu(Fmunu f_munu, Gauge gauge, const GaugeField &meta, const GaugeField &meta_ex, QudaFieldLocation location) { FmunuArg<Float,Fmunu,Gauge> arg(f_munu, gauge, meta, meta_ex); FmunuCompute<Float,FmunuArg<Float,Fmunu,Gauge> > fmunuCompute(arg, meta, location); fmunuCompute.apply(0); qudaDeviceSynchronize(); checkCudaError(); } template<typename Float> void computeFmunu(GaugeField &Fmunu, const GaugeField &gauge, QudaFieldLocation location) { if (Fmunu.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (gauge.isNative()) { typedef gauge::FloatNOrder<Float, 18, 2, 18> F; if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else { errorQuda("Reconstruction type %d not supported", gauge.Reconstruct()); } } else { errorQuda("Gauge field order %d not supported", gauge.Order()); } } else { errorQuda("Fmunu field order %d not supported", Fmunu.Order()); } } #endif // GPU_GAUGE_TOOLS void computeFmunu(GaugeField &Fmunu, const GaugeField& gauge, QudaFieldLocation location){ #ifdef GPU_GAUGE_TOOLS if (Fmunu.Precision() != gauge.Precision()) { errorQuda("Fmunu precision %d must match gauge precision %d", Fmunu.Precision(), gauge.Precision()); } if (gauge.Precision() == QUDA_DOUBLE_PRECISION){ computeFmunu<double>(Fmunu, gauge, location); } else if(gauge.Precision() == QUDA_SINGLE_PRECISION) { computeFmunu<float>(Fmunu, gauge, location); } else { errorQuda("Precision %d not supported", gauge.Precision()); } return; #else errorQuda("Fmunu has not been built"); #endif // GPU_GAUGE_TOOLS } } // namespace quda
9bd0932e9bba06ef29794540b5fa0e84d9d1a80f.cu
#include <quda_internal.h> #include <quda_matrix.h> #include <tune_quda.h> #include <gauge_field.h> #include <gauge_field_order.h> #include <index_helper.cuh> namespace quda { #ifdef GPU_GAUGE_TOOLS template<typename Float, typename Fmunu, typename Gauge> struct FmunuArg { int threads; // number of active threads required int X[4]; // grid dimensions int border[4]; Fmunu f; Gauge gauge; FmunuArg(Fmunu &f, Gauge &gauge, const GaugeField &meta, const GaugeField &meta_ex) : threads(meta.VolumeCB()), f(f), gauge(gauge) { for (int dir=0; dir<4; ++dir) { X[dir] = meta.X()[dir]; border[dir] = (meta_ex.X()[dir] - X[dir])/2; } } }; template <int mu, int nu, typename Float, typename Arg> __device__ __forceinline__ void computeFmunuCore(Arg &arg, int idx, int parity) { typedef Matrix<complex<Float>,3> Link; int x[4]; auto &X = arg.X; getCoords(x, idx, X, parity); for (int dir=0; dir<4; ++dir) { x[dir] += arg.border[dir]; X[dir] += 2*arg.border[dir]; } Link F; { // U(x,mu) U(x+mu,nu) U[dagger](x+nu,mu) U[dagger](x,nu) // load U(x)_(+mu) int dx[4] = {0, 0, 0, 0}; Link U1 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); // load U(x+mu)_(+nu) dx[mu]++; Link U2 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[mu]--; // load U(x+nu)_(+mu) dx[nu]++; Link U3 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[nu]--; // load U(x)_(+nu) Link U4 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); // compute plaquette F = U1 * U2 * conj(U3) * conj(U4); } { // U(x,nu) U[dagger](x+nu-mu,mu) U[dagger](x-mu,nu) U(x-mu, mu) // load U(x)_(+nu) int dx[4] = {0, 0, 0, 0}; Link U1 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); // load U(x+nu)_(-mu) = U(x+nu-mu)_(+mu) dx[nu]++; dx[mu]--; Link U2 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); dx[mu]++; dx[nu]--; // load U(x-mu)_nu dx[mu]--; Link U3 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[mu]++; // load U(x)_(-mu) = U(x-mu)_(+mu) dx[mu]--; Link U4 = arg.gauge(mu, linkIndexShift(x,dx,X),1-parity); dx[mu]++; // sum this contribution to Fmunu F += U1 * conj(U2) * conj(U3) * U4; } { // U[dagger](x-nu,nu) U(x-nu,mu) U(x+mu-nu,nu) U[dagger](x,mu) // load U(x)_(-nu) int dx[4] = {0, 0, 0, 0}; dx[nu]--; Link U1 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // load U(x-nu)_(+mu) dx[nu]--; Link U2 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // load U(x+mu-nu)_(+nu) dx[mu]++; dx[nu]--; Link U3 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]--; // load U(x)_(+mu) Link U4 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); // sum this contribution to Fmunu F += conj(U1) * U2 * U3 * conj(U4); } { // U[dagger](x-mu,mu) U[dagger](x-mu-nu,nu) U(x-mu-nu,mu) U(x-nu,nu) // load U(x)_(-mu) int dx[4] = {0, 0, 0, 0}; dx[mu]--; Link U1 = arg.gauge(mu, linkIndexShift(x,dx,X), 1-parity); dx[mu]++; // load U(x-mu)_(-nu) = U(x-mu-nu)_(+nu) dx[mu]--; dx[nu]--; Link U2 = arg.gauge(nu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]++; // load U(x-nu)_mu dx[mu]--; dx[nu]--; Link U3 = arg.gauge(mu, linkIndexShift(x,dx,X), parity); dx[nu]++; dx[mu]++; // load U(x)_(-nu) = U(x-nu)_(+nu) dx[nu]--; Link U4 = arg.gauge(nu, linkIndexShift(x,dx,X), 1-parity); dx[nu]++; // sum this contribution to Fmunu F += conj(U1) * conj(U2) * U3 * U4; } // 3 matrix additions, 12 matrix-matrix multiplications, 8 matrix conjugations // Each matrix conjugation involves 9 unary minus operations but these ar not included in the operation count // Each matrix addition involves 18 real additions // Each matrix-matrix multiplication involves 9*3 complex multiplications and 9*2 complex additions // = 9*3*6 + 9*2*2 = 198 floating-point ops // => Total number of floating point ops per site above is // 3*18 + 12*198 = 54 + 2376 = 2430 { F -= conj(F); // 18 real subtractions + one matrix conjugation F *= static_cast<Float>(0.125); // 18 real multiplications // 36 floating point operations here } constexpr int munu_idx = (mu*(mu-1))/2 + nu; // lower-triangular indexing arg.f(munu_idx, idx, parity) = F; } template<typename Float, typename Arg> __global__ void computeFmunuKernel(Arg arg){ int x_cb = threadIdx.x + blockIdx.x*blockDim.x; int parity = threadIdx.y + blockIdx.y*blockDim.y; int mu_nu = threadIdx.z + blockIdx.z*blockDim.z; if (x_cb >= arg.threads) return; if (mu_nu >= 6) return; switch(mu_nu) { // F[1,0], F[2,0], F[2,1], F[3,0], F[3,1], F[3,2] case 0: computeFmunuCore<1,0,Float>(arg, x_cb, parity); break; case 1: computeFmunuCore<2,0,Float>(arg, x_cb, parity); break; case 2: computeFmunuCore<2,1,Float>(arg, x_cb, parity); break; case 3: computeFmunuCore<3,0,Float>(arg, x_cb, parity); break; case 4: computeFmunuCore<3,1,Float>(arg, x_cb, parity); break; case 5: computeFmunuCore<3,2,Float>(arg, x_cb, parity); break; } } template<typename Float, typename Arg> void computeFmunuCPU(Arg &arg) { for (int parity=0; parity<2; parity++) { for (int x_cb=0; x_cb<arg.threads; x_cb++) { for (int mu=0; mu<4; mu++) { for (int nu=0; nu<mu; nu++) { int mu_nu = (mu*(mu-1))/2 + nu; switch(mu_nu) { // F[1,0], F[2,0], F[2,1], F[3,0], F[3,1], F[3,2] case 0: computeFmunuCore<1,0,Float>(arg, x_cb, parity); break; case 1: computeFmunuCore<2,0,Float>(arg, x_cb, parity); break; case 2: computeFmunuCore<2,1,Float>(arg, x_cb, parity); break; case 3: computeFmunuCore<3,0,Float>(arg, x_cb, parity); break; case 4: computeFmunuCore<3,1,Float>(arg, x_cb, parity); break; case 5: computeFmunuCore<3,2,Float>(arg, x_cb, parity); break; } } } } } } template<typename Float, typename Arg> class FmunuCompute : TunableVectorYZ { Arg &arg; const GaugeField &meta; const QudaFieldLocation location; private: unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: FmunuCompute(Arg &arg, const GaugeField &meta, QudaFieldLocation location) : TunableVectorYZ(2,6), arg(arg), meta(meta), location(location) { writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,meta.Stride(),sizeof(Float)); } virtual ~FmunuCompute() {} void apply(const cudaStream_t &stream){ if (location == QUDA_CUDA_FIELD_LOCATION) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); computeFmunuKernel<Float><<<tp.grid,tp.block,tp.shared_bytes>>>(arg); } else { computeFmunuCPU<Float>(arg); } } TuneKey tuneKey() const { return TuneKey(meta.VolString(), typeid(*this).name(), aux); } long long flops() const { return (2430 + 36)*6*2*(long long)arg.threads; } long long bytes() const { return ((16*arg.gauge.Bytes() + arg.f.Bytes())*6*2*arg.threads); } // Ignores link reconstruction }; // FmunuCompute template<typename Float, typename Fmunu, typename Gauge> void computeFmunu(Fmunu f_munu, Gauge gauge, const GaugeField &meta, const GaugeField &meta_ex, QudaFieldLocation location) { FmunuArg<Float,Fmunu,Gauge> arg(f_munu, gauge, meta, meta_ex); FmunuCompute<Float,FmunuArg<Float,Fmunu,Gauge> > fmunuCompute(arg, meta, location); fmunuCompute.apply(0); qudaDeviceSynchronize(); checkCudaError(); } template<typename Float> void computeFmunu(GaugeField &Fmunu, const GaugeField &gauge, QudaFieldLocation location) { if (Fmunu.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (gauge.isNative()) { typedef gauge::FloatNOrder<Float, 18, 2, 18> F; if (gauge.Reconstruct() == QUDA_RECONSTRUCT_NO) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_12) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else if(gauge.Reconstruct() == QUDA_RECONSTRUCT_8) { typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type G; computeFmunu<Float>(F(Fmunu), G(gauge), Fmunu, gauge, location); } else { errorQuda("Reconstruction type %d not supported", gauge.Reconstruct()); } } else { errorQuda("Gauge field order %d not supported", gauge.Order()); } } else { errorQuda("Fmunu field order %d not supported", Fmunu.Order()); } } #endif // GPU_GAUGE_TOOLS void computeFmunu(GaugeField &Fmunu, const GaugeField& gauge, QudaFieldLocation location){ #ifdef GPU_GAUGE_TOOLS if (Fmunu.Precision() != gauge.Precision()) { errorQuda("Fmunu precision %d must match gauge precision %d", Fmunu.Precision(), gauge.Precision()); } if (gauge.Precision() == QUDA_DOUBLE_PRECISION){ computeFmunu<double>(Fmunu, gauge, location); } else if(gauge.Precision() == QUDA_SINGLE_PRECISION) { computeFmunu<float>(Fmunu, gauge, location); } else { errorQuda("Precision %d not supported", gauge.Precision()); } return; #else errorQuda("Fmunu has not been built"); #endif // GPU_GAUGE_TOOLS } } // namespace quda
67131c3591ad21c9628f39f0994f8d0611bea2a0.hip
// !!! This is a file automatically generated by hipify!!! /* * This program is a CUDA C program simulating the N-body system * of two galaxies as PHY 241 FINAL PROJECTS * */ /* * TODO:(*for final project) * 1. andromeda * 2. report * 3. presentation * *4. N-body galaxy code-generat 10^11 particles * *5. MatLab write a function to track the distance between Milkway and Andromeda * *6. change accel function to the N-body one. * *7. print mass[i]. because the halo is dark matter. Or better way distinguish dark matter and rings? */ #include <hip/hip_runtime.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #define PI 3.14159265 #define BUFFERSIZE 500 #define BLOCKSIZE 256 #define G 1.0 #define MASS_1 38.2352941 #define RMIN (7.733/4.5) #define SOFTPARAMETER 0.000001 #define AndromedaXOffsetP -41.0882 #define AndromedaYOffsetP 68.3823 #define AndromedaZOffsetP -33.8634 #define AndromedaXOffsetV 0.0420 #define AndromedaYOffsetV -0.2504 #define AndromedaZOffsetV 0.1240 #define MilkwayXOffsetP 41.0882 #define MilkwayYOffsetP -68.3823 #define MilkwayZOffsetP 33.8634 #define MilkwayXOffsetV -0.0420 #define MilkwayYOffsetV 0.2504 #define MilkwayZOffsetV -0.1240 // Headers void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta); __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt); __global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt); __global__ void printstate(double *x, double *y, double *z, unsigned long tnow); void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size); void read_size_from_file(char *input, unsigned long *size) ; /** Main function **/ int main(int argc, char *argv[]) { /* * Handling commandline inputs and setting initial value of the arguments * 1. number of steps (mstep) * 2. warp (nout) * 3. offset (start printing position) * 4. timestamp (dt) * */ unsigned long mstep, nout, offset, tnow = 0, n; double dt, *x, *y, *z, *vx, *vy, *vz, *mass; mstep = (argc > 1) ? atoi(argv[1]) : 100; nout = (argc > 2) ? atoi(argv[2]) : 1; offset = (argc > 3) ? atoi(argv[3]) : 0; dt = (argc > 4) ? atof(argv[4]) : (2.0 * PI * RMIN * RMIN) / (sqrt(G * MASS_1) * 40.0); initialCondition_host_file("milky_way.dat", "andromeda.dat", &x, &y, &z, &vx, &vy, &vz, &mass, &n); unsigned long grids = ceil((double)n / BLOCKSIZE), threads = BLOCKSIZE; /* * Use hipDeviceSetLimit() to change the buffer size of printf * used in kernel functions to solve the problem encountered before: * cannot print more than 4096 lines of data using printf * */ hipDeviceSetLimit(hipLimitPrintfFifoSize, n * BUFFERSIZE); /* Start looping steps from first step to mstep */ for (unsigned long i = 0; i < offset; i++, tnow++){ hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt); hipDeviceSynchronize(); hipLaunchKernelGGL(( leapstep), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, dt); hipDeviceSynchronize(); hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt); hipDeviceSynchronize(); } for (unsigned long i = offset; i < mstep; i++, tnow++) { if(i % nout == 0) { hipLaunchKernelGGL(( printstate), dim3(grids), dim3(threads), 0, 0, x, y, z, tnow); hipDeviceSynchronize(); } hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt); hipDeviceSynchronize(); hipLaunchKernelGGL(( leapstep), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, dt); hipDeviceSynchronize(); hipLaunchKernelGGL(( accel), dim3(grids), dim3(BLOCKSIZE), 0, 0, n, x, y, z, vx, vy, vz, mass, dt); hipDeviceSynchronize(); } if(mstep % nout == 0) { hipLaunchKernelGGL(( printstate), dim3(grids), dim3(BLOCKSIZE), 0, 0, x, y, z, tnow); } hipDeviceSynchronize(); // After finishing, free the allocated memory hipFree(x); // Exit the current thread return 0; } void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta) { double sigma = -theta; double c = cos(sigma); double s = sin(sigma); double a = 1 - cos(sigma); double tmpx = ( a * n1 * n1 + c ) * (*x) + ( a * n1 * n2 - s * n3 ) * (*y) + ( a * n1 * n3 + s * n2 ) * (*z); double tmpy = ( a * n1 * n2 + s * n3 ) * (*x) + ( a * n2 * n2 + c ) * (*y) + ( a * n2 * n3 - s * n1 ) * (*z); double tmpz = ( a * n1 * n3 - s * n2 ) * (*x) + ( a * n2 * n3 + s * n1 ) * (*y) + ( a * n3 * n3 + c ) * (*z); (*x) = tmpx; (*y) = tmpy; (*z) = tmpz; } __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if (serial < n){ x[serial] += dt * vx[serial]; y[serial] += dt * vy[serial]; z[serial] += dt * vz[serial]; } } __global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long tdx = threadIdx.x; __shared__ double lx[BLOCKSIZE]; __shared__ double ly[BLOCKSIZE]; __shared__ double lz[BLOCKSIZE]; __shared__ double lm[BLOCKSIZE]; double ax = 0.0, ay = 0.0, az = 0.0; double norm; double thisX, thisY, thisZ; if (serial < n) { thisX = x[serial]; thisY = y[serial]; thisZ = z[serial]; } for (unsigned long i = 0; i < gridDim.x; i++) { unsigned long index = i * blockDim.x + tdx; if (index < n) { // Copy data from main memory lx[tdx] = x[index]; lz[tdx] = y[index]; ly[tdx] = z[index]; lm[tdx] = mass[index]; } __syncthreads(); // Accumulates the acceleration #pragma unroll for (unsigned long j = 0; j < BLOCKSIZE; j++) { unsigned long pos = i * blockDim.x + j; if (pos >= n) { continue; } norm = pow(SOFTPARAMETER + pow(thisX - lx[j], 2) + pow(thisY - ly[j], 2) + pow(thisZ - lz[j], 2), 1.5); ax += - G * lm[j] * (thisX - lx[j]) / norm; ay += - G * lm[j] * (thisY - ly[j]) / norm; az += - G * lm[j] * (thisZ - lz[j]) / norm; } __syncthreads(); } if (serial < n) { vx[serial] += 0.5 * dt * ax; vy[serial] += 0.5 * dt * ay; vz[serial] += 0.5 * dt * az; } } __global__ void printstate(double *x, double *y, double *z, unsigned long tnow) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if(serial < 10000 || (serial >= 44000 && serial < 54000)){ printf("%d,%12.6lf,%12.6lf,%12.6lf,%d\n", serial, x[serial], y[serial], z[serial], tnow); } } void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size) { unsigned long s1, s2; read_size_from_file(input1, &s1); (*size) = s1; read_size_from_file(input2, &s2); (*size) += s2; unsigned long numOfBlocks = ceil(((double)(*size)) / BLOCKSIZE); // Initial local data array double *lx, *ly, *lz, *lvx, *lvy, *lvz, *lm; lx = (double*) malloc(7 * numOfBlocks * BLOCKSIZE * sizeof(double)); ly = lx + numOfBlocks * BLOCKSIZE; lz = ly + numOfBlocks * BLOCKSIZE; lvx = lz + numOfBlocks * BLOCKSIZE; lvy = lvx + numOfBlocks * BLOCKSIZE; lvz = lvy + numOfBlocks * BLOCKSIZE; lm = lvz + numOfBlocks * BLOCKSIZE; // Read data from file1 FILE *fp = fopen(input1, "r"); if(fp == NULL){ printf("Error: fail to open file 1\n"); exit(-1); } unsigned long count = 0; // Skip first galaxy unsigned long junk1; double junk2; fscanf(fp, "%lu %lf\n", &junk1, &junk2); double omega = 0.0; double sigma = PI / 2.0; while((!feof(fp)) && (count < s1)){ fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count); rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma); rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma); *(lx + count) += MilkwayXOffsetP; *(ly + count) += MilkwayYOffsetP; *(lz + count) += MilkwayZOffsetP; *(lvx + count) += MilkwayXOffsetV; *(lvy + count) += MilkwayYOffsetV; *(lvz + count) += MilkwayZOffsetV; count++; } fclose(fp); // Read data from file2 fp = fopen(input2, "r"); if(fp == NULL){ printf("Error: fail to open file 2\n"); exit(-1); } // Skip first line fscanf(fp, "%lu %lf\n", &junk1, &junk2); omega = - 2.0 * PI / 3.0; sigma = PI / 6.0; while((!feof(fp)) && (count < (*size))){ fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count); rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma); rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma); *(lx + count) += AndromedaXOffsetP; *(ly + count) += AndromedaYOffsetP; *(lz + count) += AndromedaZOffsetP; *(lvx + count) += AndromedaXOffsetV; *(lvy + count) += AndromedaYOffsetV; *(lvz + count) += AndromedaZOffsetV; count++; } fclose(fp); // Allocate device memory hipMalloc(x, 7 * numOfBlocks * BLOCKSIZE * sizeof(double)); (*y) = (*x) + numOfBlocks * BLOCKSIZE; (*z) = (*y) + numOfBlocks * BLOCKSIZE; (*vx) = (*z) + numOfBlocks * BLOCKSIZE; (*vy) = (*vx) + numOfBlocks * BLOCKSIZE; (*vz) = (*vy) + numOfBlocks * BLOCKSIZE; (*mass) = (*vz) + numOfBlocks * BLOCKSIZE; hipMemcpy((*x), lx, 7 * numOfBlocks * BLOCKSIZE * sizeof(double), hipMemcpyHostToDevice); free(lx); } void read_size_from_file(char *input, unsigned long *size) { FILE *fp = fopen(input, "r"); fscanf(fp, "%lu", size); fclose(fp); }
67131c3591ad21c9628f39f0994f8d0611bea2a0.cu
/* * This program is a CUDA C program simulating the N-body system * of two galaxies as PHY 241 FINAL PROJECTS * */ /* * TODO:(*for final project) * 1. andromeda * 2. report * 3. presentation * *4. N-body galaxy code-generat 10^11 particles * *5. MatLab write a function to track the distance between Milkway and Andromeda * *6. change accel function to the N-body one. * *7. print mass[i]. because the halo is dark matter. Or better way distinguish dark matter and rings? */ #include <cuda.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #define PI 3.14159265 #define BUFFERSIZE 500 #define BLOCKSIZE 256 #define G 1.0 #define MASS_1 38.2352941 #define RMIN (7.733/4.5) #define SOFTPARAMETER 0.000001 #define AndromedaXOffsetP -41.0882 #define AndromedaYOffsetP 68.3823 #define AndromedaZOffsetP -33.8634 #define AndromedaXOffsetV 0.0420 #define AndromedaYOffsetV -0.2504 #define AndromedaZOffsetV 0.1240 #define MilkwayXOffsetP 41.0882 #define MilkwayYOffsetP -68.3823 #define MilkwayZOffsetP 33.8634 #define MilkwayXOffsetV -0.0420 #define MilkwayYOffsetV 0.2504 #define MilkwayZOffsetV -0.1240 // Headers void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta); __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt); __global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt); __global__ void printstate(double *x, double *y, double *z, unsigned long tnow); void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size); void read_size_from_file(char *input, unsigned long *size) ; /** Main function **/ int main(int argc, char *argv[]) { /* * Handling commandline inputs and setting initial value of the arguments * 1. number of steps (mstep) * 2. warp (nout) * 3. offset (start printing position) * 4. timestamp (dt) * */ unsigned long mstep, nout, offset, tnow = 0, n; double dt, *x, *y, *z, *vx, *vy, *vz, *mass; mstep = (argc > 1) ? atoi(argv[1]) : 100; nout = (argc > 2) ? atoi(argv[2]) : 1; offset = (argc > 3) ? atoi(argv[3]) : 0; dt = (argc > 4) ? atof(argv[4]) : (2.0 * PI * RMIN * RMIN) / (sqrt(G * MASS_1) * 40.0); initialCondition_host_file("milky_way.dat", "andromeda.dat", &x, &y, &z, &vx, &vy, &vz, &mass, &n); unsigned long grids = ceil((double)n / BLOCKSIZE), threads = BLOCKSIZE; /* * Use cudaDeviceSetLimit() to change the buffer size of printf * used in kernel functions to solve the problem encountered before: * cannot print more than 4096 lines of data using printf * */ cudaDeviceSetLimit(cudaLimitPrintfFifoSize, n * BUFFERSIZE); /* Start looping steps from first step to mstep */ for (unsigned long i = 0; i < offset; i++, tnow++){ accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt); cudaDeviceSynchronize(); leapstep<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, dt); cudaDeviceSynchronize(); accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt); cudaDeviceSynchronize(); } for (unsigned long i = offset; i < mstep; i++, tnow++) { if(i % nout == 0) { printstate<<<grids, threads>>> (x, y, z, tnow); cudaDeviceSynchronize(); } accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt); cudaDeviceSynchronize(); leapstep<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, dt); cudaDeviceSynchronize(); accel<<<grids, BLOCKSIZE>>> (n, x, y, z, vx, vy, vz, mass, dt); cudaDeviceSynchronize(); } if(mstep % nout == 0) { printstate<<<grids, BLOCKSIZE>>>(x, y, z, tnow); } cudaDeviceSynchronize(); // After finishing, free the allocated memory cudaFree(x); // Exit the current thread return 0; } void rotate(double* x, double* y, double *z, double n1, double n2, double n3, double theta) { double sigma = -theta; double c = cos(sigma); double s = sin(sigma); double a = 1 - cos(sigma); double tmpx = ( a * n1 * n1 + c ) * (*x) + ( a * n1 * n2 - s * n3 ) * (*y) + ( a * n1 * n3 + s * n2 ) * (*z); double tmpy = ( a * n1 * n2 + s * n3 ) * (*x) + ( a * n2 * n2 + c ) * (*y) + ( a * n2 * n3 - s * n1 ) * (*z); double tmpz = ( a * n1 * n3 - s * n2 ) * (*x) + ( a * n2 * n3 + s * n1 ) * (*y) + ( a * n3 * n3 + c ) * (*z); (*x) = tmpx; (*y) = tmpy; (*z) = tmpz; } __global__ void leapstep(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if (serial < n){ x[serial] += dt * vx[serial]; y[serial] += dt * vy[serial]; z[serial] += dt * vz[serial]; } } __global__ void accel(unsigned long n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; const unsigned long tdx = threadIdx.x; __shared__ double lx[BLOCKSIZE]; __shared__ double ly[BLOCKSIZE]; __shared__ double lz[BLOCKSIZE]; __shared__ double lm[BLOCKSIZE]; double ax = 0.0, ay = 0.0, az = 0.0; double norm; double thisX, thisY, thisZ; if (serial < n) { thisX = x[serial]; thisY = y[serial]; thisZ = z[serial]; } for (unsigned long i = 0; i < gridDim.x; i++) { unsigned long index = i * blockDim.x + tdx; if (index < n) { // Copy data from main memory lx[tdx] = x[index]; lz[tdx] = y[index]; ly[tdx] = z[index]; lm[tdx] = mass[index]; } __syncthreads(); // Accumulates the acceleration #pragma unroll for (unsigned long j = 0; j < BLOCKSIZE; j++) { unsigned long pos = i * blockDim.x + j; if (pos >= n) { continue; } norm = pow(SOFTPARAMETER + pow(thisX - lx[j], 2) + pow(thisY - ly[j], 2) + pow(thisZ - lz[j], 2), 1.5); ax += - G * lm[j] * (thisX - lx[j]) / norm; ay += - G * lm[j] * (thisY - ly[j]) / norm; az += - G * lm[j] * (thisZ - lz[j]) / norm; } __syncthreads(); } if (serial < n) { vx[serial] += 0.5 * dt * ax; vy[serial] += 0.5 * dt * ay; vz[serial] += 0.5 * dt * az; } } __global__ void printstate(double *x, double *y, double *z, unsigned long tnow) { const unsigned long serial = blockIdx.x * blockDim.x + threadIdx.x; if(serial < 10000 || (serial >= 44000 && serial < 54000)){ printf("%d,%12.6lf,%12.6lf,%12.6lf,%d\n", serial, x[serial], y[serial], z[serial], tnow); } } void initialCondition_host_file(char *input1, char *input2, double **x, double **y, double **z, double **vx, double **vy, double **vz, double **mass, unsigned long *size) { unsigned long s1, s2; read_size_from_file(input1, &s1); (*size) = s1; read_size_from_file(input2, &s2); (*size) += s2; unsigned long numOfBlocks = ceil(((double)(*size)) / BLOCKSIZE); // Initial local data array double *lx, *ly, *lz, *lvx, *lvy, *lvz, *lm; lx = (double*) malloc(7 * numOfBlocks * BLOCKSIZE * sizeof(double)); ly = lx + numOfBlocks * BLOCKSIZE; lz = ly + numOfBlocks * BLOCKSIZE; lvx = lz + numOfBlocks * BLOCKSIZE; lvy = lvx + numOfBlocks * BLOCKSIZE; lvz = lvy + numOfBlocks * BLOCKSIZE; lm = lvz + numOfBlocks * BLOCKSIZE; // Read data from file1 FILE *fp = fopen(input1, "r"); if(fp == NULL){ printf("Error: fail to open file 1\n"); exit(-1); } unsigned long count = 0; // Skip first galaxy unsigned long junk1; double junk2; fscanf(fp, "%lu %lf\n", &junk1, &junk2); double omega = 0.0; double sigma = PI / 2.0; while((!feof(fp)) && (count < s1)){ fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count); rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma); rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma); *(lx + count) += MilkwayXOffsetP; *(ly + count) += MilkwayYOffsetP; *(lz + count) += MilkwayZOffsetP; *(lvx + count) += MilkwayXOffsetV; *(lvy + count) += MilkwayYOffsetV; *(lvz + count) += MilkwayZOffsetV; count++; } fclose(fp); // Read data from file2 fp = fopen(input2, "r"); if(fp == NULL){ printf("Error: fail to open file 2\n"); exit(-1); } // Skip first line fscanf(fp, "%lu %lf\n", &junk1, &junk2); omega = - 2.0 * PI / 3.0; sigma = PI / 6.0; while((!feof(fp)) && (count < (*size))){ fscanf(fp, "%lf %lf %lf %lf %lf %lf %lf\n", lm + count, lx + count, ly + count, lz + count, lvx + count, lvy + count, lvz + count); rotate(lx + count, ly + count, lz + count, cos(omega), sin(omega), 0, sigma); rotate(lvx + count, lvy + count, lvz + count, cos(omega), sin(omega), 0, sigma); *(lx + count) += AndromedaXOffsetP; *(ly + count) += AndromedaYOffsetP; *(lz + count) += AndromedaZOffsetP; *(lvx + count) += AndromedaXOffsetV; *(lvy + count) += AndromedaYOffsetV; *(lvz + count) += AndromedaZOffsetV; count++; } fclose(fp); // Allocate device memory cudaMalloc(x, 7 * numOfBlocks * BLOCKSIZE * sizeof(double)); (*y) = (*x) + numOfBlocks * BLOCKSIZE; (*z) = (*y) + numOfBlocks * BLOCKSIZE; (*vx) = (*z) + numOfBlocks * BLOCKSIZE; (*vy) = (*vx) + numOfBlocks * BLOCKSIZE; (*vz) = (*vy) + numOfBlocks * BLOCKSIZE; (*mass) = (*vz) + numOfBlocks * BLOCKSIZE; cudaMemcpy((*x), lx, 7 * numOfBlocks * BLOCKSIZE * sizeof(double), cudaMemcpyHostToDevice); free(lx); } void read_size_from_file(char *input, unsigned long *size) { FILE *fp = fopen(input, "r"); fscanf(fp, "%lu", size); fclose(fp); }
ed4bc6824d007aad5e2519f426fd5e8d688a0b5e.hip
// !!! This is a file automatically generated by hipify!!! #include "bwt.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> int comp_size = 1; int lex_compare(const void * a, const void * b) { unsigned char *x1 = *(unsigned char**)a; unsigned char *x2 = *(unsigned char**)b; return memcmp(x1, x2, comp_size*sizeof(unsigned char)); } int lex_compare_2(const void * a, const void * b) { unsigned char *x1 = *(unsigned char**)a; unsigned char *x2 = *(unsigned char**)b; for (int tmp_size = comp_size; tmp_size > 0; tmp_size--){ if(!(*x1 ^ *x2)){ x1++; x2++; } else if(*x1 < *x2){ return -1; } else{ return 1; } } return 0; } void __global__ bwt_encode(unsigned char ** bwt_in, unsigned char ** bwt_out, int len) { unsigned char ** ptr_rotations, *concat_input; ptr_rotations = (unsigned char**) malloc(len*sizeof(unsigned char*)); concat_input = (unsigned char*)malloc(2*len*sizeof(unsigned char) + 1); memcpy(concat_input, *bwt_in, len*sizeof(unsigned char)); memcpy(concat_input + len*sizeof(unsigned char), *bwt_in, len*sizeof(unsigned char)); concat_input[2*len] = '\0'; int i; for(i = 0; i < len; i++){ ptr_rotations[i] = &(concat_input[i]); } comp_size = len; qsort(ptr_rotations, len, sizeof(unsigned char*), lex_compare); for( i = 0; i < len ; i++){ (*bwt_out)[i] = *(ptr_rotations[i] + (len-1)*sizeof(unsigned char)); if(ptr_rotations[i] == concat_input){ (*bwt_out)[len] = i/(256*256); (*bwt_out)[len + 1] = (i%(256*256))/256; (*bwt_out)[len + 2] = (i%(256*256))%256; } } cudafree(concat_input); concat_input = NULL; cudafree(ptr_rotations); ptr_rotations = NULL; return; }
ed4bc6824d007aad5e2519f426fd5e8d688a0b5e.cu
#include "bwt.h" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda.h> int comp_size = 1; int lex_compare(const void * a, const void * b) { unsigned char *x1 = *(unsigned char**)a; unsigned char *x2 = *(unsigned char**)b; return memcmp(x1, x2, comp_size*sizeof(unsigned char)); } int lex_compare_2(const void * a, const void * b) { unsigned char *x1 = *(unsigned char**)a; unsigned char *x2 = *(unsigned char**)b; for (int tmp_size = comp_size; tmp_size > 0; tmp_size--){ if(!(*x1 ^ *x2)){ x1++; x2++; } else if(*x1 < *x2){ return -1; } else{ return 1; } } return 0; } void __global__ bwt_encode(unsigned char ** bwt_in, unsigned char ** bwt_out, int len) { unsigned char ** ptr_rotations, *concat_input; ptr_rotations = (unsigned char**) malloc(len*sizeof(unsigned char*)); concat_input = (unsigned char*)malloc(2*len*sizeof(unsigned char) + 1); memcpy(concat_input, *bwt_in, len*sizeof(unsigned char)); memcpy(concat_input + len*sizeof(unsigned char), *bwt_in, len*sizeof(unsigned char)); concat_input[2*len] = '\0'; int i; for(i = 0; i < len; i++){ ptr_rotations[i] = &(concat_input[i]); } comp_size = len; qsort(ptr_rotations, len, sizeof(unsigned char*), lex_compare); for( i = 0; i < len ; i++){ (*bwt_out)[i] = *(ptr_rotations[i] + (len-1)*sizeof(unsigned char)); if(ptr_rotations[i] == concat_input){ (*bwt_out)[len] = i/(256*256); (*bwt_out)[len + 1] = (i%(256*256))/256; (*bwt_out)[len + 2] = (i%(256*256))%256; } } cudafree(concat_input); concat_input = NULL; cudafree(ptr_rotations); ptr_rotations = NULL; return; }
ea944efae98dfe70428dc6b91ca81c3854ce4d7f.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <unistd.h> #include <signal.h> #include "NeuralNet.cuh" sig_atomic_t volatile g_running = 1; void sig_handler(int signum) { if (signum == SIGINT) g_running = 0; } __global__ void add_input_spikes(NeuralNet *elem) { return; } __global__ void push_spikes(NeuralNet *elem) { return; } __global__ void generate_spikes(NeuralNet *elem) { return; } void launch_add_input_spikes(NeuralNet *elem) { hipLaunchKernelGGL(( add_input_spikes), dim3(1), dim3(1) , 0, 0, elem); hipDeviceSynchronize(); } void launch_push_spikes(NeuralNet *elem) { hipLaunchKernelGGL(( push_spikes), dim3(1), dim3(1) , 0, 0, elem); hipDeviceSynchronize(); } void launch_generate_spikes(NeuralNet *elem) { hipLaunchKernelGGL(( generate_spikes), dim3(1), dim3(1) , 0, 0, elem); hipDeviceSynchronize(); } int main(int argc, char **argv) { NeuralNet* neuralNet = new NeuralNet(5); // ToDo build up neural net here neuralNet->initThreadBlocks(); signal(SIGINT, &sig_handler); while (g_running) { // neuralNet->updateActivity(); neuralNet->trial(); // neuralNet->getActivity(); // launch_add_input_spikes(neuralNet); // launch_push_spikes(neuralNet); // launch_generate_spikes(neuralNet); } printf("exiting safely\n"); // printf("On host (after by-pointer): name=%s, value=%d\n", e->name.c_str(), e->value); delete neuralNet; hipDeviceReset(); return 0; }
ea944efae98dfe70428dc6b91ca81c3854ce4d7f.cu
#include <cuda_runtime.h> #include <stdio.h> #include <unistd.h> #include <signal.h> #include "NeuralNet.cuh" sig_atomic_t volatile g_running = 1; void sig_handler(int signum) { if (signum == SIGINT) g_running = 0; } __global__ void add_input_spikes(NeuralNet *elem) { return; } __global__ void push_spikes(NeuralNet *elem) { return; } __global__ void generate_spikes(NeuralNet *elem) { return; } void launch_add_input_spikes(NeuralNet *elem) { add_input_spikes<<< 1, 1 >>>(elem); cudaDeviceSynchronize(); } void launch_push_spikes(NeuralNet *elem) { push_spikes<<< 1, 1 >>>(elem); cudaDeviceSynchronize(); } void launch_generate_spikes(NeuralNet *elem) { generate_spikes<<< 1, 1 >>>(elem); cudaDeviceSynchronize(); } int main(int argc, char **argv) { NeuralNet* neuralNet = new NeuralNet(5); // ToDo build up neural net here neuralNet->initThreadBlocks(); signal(SIGINT, &sig_handler); while (g_running) { // neuralNet->updateActivity(); neuralNet->trial(); // neuralNet->getActivity(); // launch_add_input_spikes(neuralNet); // launch_push_spikes(neuralNet); // launch_generate_spikes(neuralNet); } printf("exiting safely\n"); // printf("On host (after by-pointer): name=%s, value=%d\n", e->name.c_str(), e->value); delete neuralNet; cudaDeviceReset(); return 0; }
simple-grid.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Kernel definition (notice the use of single-precision!) __global__ void MadAdd(float A[N][N], float B[N][N], float C[N][N]) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < N && j < N) { C[i][j] = A[i][j] + B[i][j]; } } int main() { //stuff //Kernel invocation //each block has 16x16 threads dim3 threadsPerBlock(16, 16); dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y) hipLaunchKernelGGL(( MatAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, A,B,C); }
simple-grid.cu
//Kernel definition (notice the use of single-precision!) __global__ void MadAdd(float A[N][N], float B[N][N], float C[N][N]) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < N && j < N) { C[i][j] = A[i][j] + B[i][j]; } } int main() { //stuff //Kernel invocation //each block has 16x16 threads dim3 threadsPerBlock(16, 16); dim3 numBlocks(N/threadsPerBlock.x, N/threadsPerBlock.y) MatAdd<<<numBlocks, threadsPerBlock>>>(A,B,C); }
9efcb2e8bbb8b405602ba56de2d2ca208015b19e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <array> #include <climits> #include <cmath> #include <cstdio> #include <iostream> #include <iterator> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <hipcub/hipcub.hpp> #endif // HAVE_CUB #ifdef USE_NVTX #include <roctracer/roctx.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE roctxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ hipError_t cudaStatus = call; \ if (hipSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, hipGetErrorString(cudaStatus), cudaStatus); \ } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int nx, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * iy / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx) { #ifdef HAVE_CUB typedef hipcub::BlockReduce<real, BLOCK_DIM_X, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB const int iy = blockIdx.y * blockDim.y + threadIdx.y + 1; const int ix = blockIdx.x * blockDim.x + threadIdx.x; real local_l2_norm = 0.0; if (iy < iy_end) { if (ix >= 1 && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; // apply boundary conditions if (iy_start == iy) { a_new[iy_end * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new[(iy_start - 1) * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm = residue * residue; } } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double noopt(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { hipEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck = 1 is supported\n"); return -1; } real* a; real* a_new; hipStream_t compute_stream; hipStream_t copy_l2_norm_stream; hipStream_t reset_l2_norm_stream; hipEvent_t compute_done; hipEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(hipSetDevice(0)); CUDA_RT_CALL(hipFree(0)); CUDA_RT_CALL(hipMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(hipMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder hipLaunchKernelGGL(( initialize_boundaries), dim3(ny / 128 + 1), dim3(128), 0, 0, a, a_new, PI, nx, ny); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipDeviceSynchronize()); CUDA_RT_CALL(hipStreamCreate(&compute_stream)); CUDA_RT_CALL(hipStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(hipEventCreateWithFlags(&compute_done, hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[0], hipEventDisableTiming)); CUDA_RT_CALL(hipEventCreateWithFlags(&reset_l2_norm_done[1], hipEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipEventCreateWithFlags(&l2_norm_bufs[i].copy_done, hipEventDisableTiming)); CUDA_RT_CALL(hipMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(hipMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(hipHostMalloc(&l2_norm_bufs[i].h, sizeof(real))); (*l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(hipDeviceSynchronize()); if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with norm check " "every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 0.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // wait for memset from old previous iteration to complete CUDA_RT_CALL(hipStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); hipLaunchKernelGGL(( jacobi_kernel<dim_block_x, dim_block_y>) , dim3(dim_grid), dim3({dim_block_x), dim_block_y, 1}, 0, compute_stream, a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx); CUDA_RT_CALL(hipGetLastError()); CUDA_RT_CALL(hipEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { CUDA_RT_CALL(hipStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(hipMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), hipMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // make sure D2H copy is complete before using the data for // calculation CUDA_RT_CALL(hipEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL( hipMemsetAsync(l2_norm_bufs[prev].d, 0, sizeof(real), reset_l2_norm_stream)); CUDA_RT_CALL(hipEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(hipDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); if (csv) { printf("single_gpu, %d, %d, %d, %d, %f\n", nx, ny, iter_max, nccheck, (stop - start)); } else { printf("%dx%d: 1 GPU: %8.4f s\n", ny, nx, (stop - start)); } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(hipHostFree(l2_norm_bufs[i].h)); CUDA_RT_CALL(hipFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(hipEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(hipEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(hipEventDestroy(compute_done)); CUDA_RT_CALL(hipStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(hipStreamDestroy(compute_stream)); CUDA_RT_CALL(hipFree(a_new)); CUDA_RT_CALL(hipFree(a)); return 0; }
9efcb2e8bbb8b405602ba56de2d2ca208015b19e.cu
#include <algorithm> #include <array> #include <climits> #include <cmath> #include <cstdio> #include <iostream> #include <iterator> #include <sstream> #include <omp.h> #ifdef HAVE_CUB #include <cub/block/block_reduce.cuh> #endif // HAVE_CUB #ifdef USE_NVTX #include <nvToolsExt.h> const uint32_t colors[] = {0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff}; const int num_colors = sizeof(colors) / sizeof(uint32_t); #define PUSH_RANGE(name, cid) \ { \ int color_id = cid; \ color_id = color_id % num_colors; \ nvtxEventAttributes_t eventAttrib = {0}; \ eventAttrib.version = NVTX_VERSION; \ eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \ eventAttrib.colorType = NVTX_COLOR_ARGB; \ eventAttrib.color = colors[color_id]; \ eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \ eventAttrib.message.ascii = name; \ nvtxRangePushEx(&eventAttrib); \ } #define POP_RANGE nvtxRangePop(); #else #define PUSH_RANGE(name, cid) #define POP_RANGE #endif #define CUDA_RT_CALL(call) \ { \ cudaError_t cudaStatus = call; \ if (cudaSuccess != cudaStatus) \ fprintf(stderr, \ "ERROR: CUDA RT call \"%s\" in line %d of file %s failed " \ "with " \ "%s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } typedef float real; constexpr real tol = 1.0e-8; const real PI = 2.0 * std::asin(1.0); __global__ void initialize_boundaries(real* __restrict__ const a_new, real* __restrict__ const a, const real pi, const int nx, const int ny) { for (int iy = blockIdx.x * blockDim.x + threadIdx.x; iy < ny; iy += blockDim.x * gridDim.x) { const real y0 = sin(2.0 * pi * iy / (ny - 1)); a[iy * nx + 0] = y0; a[iy * nx + (nx - 1)] = y0; a_new[iy * nx + 0] = y0; a_new[iy * nx + (nx - 1)] = y0; } } template <int BLOCK_DIM_X, int BLOCK_DIM_Y> __global__ void jacobi_kernel(real* __restrict__ const a_new, const real* __restrict__ const a, real* __restrict__ const l2_norm, const int iy_start, const int iy_end, const int nx) { #ifdef HAVE_CUB typedef cub::BlockReduce<real, BLOCK_DIM_X, cub::BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_DIM_Y> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; #endif // HAVE_CUB const int iy = blockIdx.y * blockDim.y + threadIdx.y + 1; const int ix = blockIdx.x * blockDim.x + threadIdx.x; real local_l2_norm = 0.0; if (iy < iy_end) { if (ix >= 1 && ix < (nx - 1)) { const real new_val = 0.25 * (a[iy * nx + ix + 1] + a[iy * nx + ix - 1] + a[(iy + 1) * nx + ix] + a[(iy - 1) * nx + ix]); a_new[iy * nx + ix] = new_val; // apply boundary conditions if (iy_start == iy) { a_new[iy_end * nx + ix] = new_val; } if ((iy_end - 1) == iy) { a_new[(iy_start - 1) * nx + ix] = new_val; } real residue = new_val - a[iy * nx + ix]; local_l2_norm = residue * residue; } } #ifdef HAVE_CUB real block_l2_norm = BlockReduce(temp_storage).Sum(local_l2_norm); if (0 == threadIdx.y && 0 == threadIdx.x) atomicAdd(l2_norm, block_l2_norm); #else atomicAdd(l2_norm, local_l2_norm); #endif // HAVE_CUB } double noopt(const int nx, const int ny, const int iter_max, real* const a_ref_h, const int nccheck, const bool print); template <typename T> T get_argval(char** begin, char** end, const std::string& arg, const T default_val) { T argval = default_val; char** itr = std::find(begin, end, arg); if (itr != end && ++itr != end) { std::istringstream inbuf(*itr); inbuf >> argval; } return argval; } bool get_arg(char** begin, char** end, const std::string& arg) { char** itr = std::find(begin, end, arg); if (itr != end) { return true; } return false; } struct l2_norm_buf { cudaEvent_t copy_done; real* d; real* h; }; int main(int argc, char* argv[]) { const int iter_max = get_argval<int>(argv, argv + argc, "-niter", 1000); const int nccheck = get_argval<int>(argv, argv + argc, "-nccheck", 1); const int nx = get_argval<int>(argv, argv + argc, "-nx", 7168); const int ny = get_argval<int>(argv, argv + argc, "-ny", 7168); const bool csv = get_arg(argv, argv + argc, "-csv"); if (nccheck != 1) { fprintf(stderr, "Only nccheck = 1 is supported\n"); return -1; } real* a; real* a_new; cudaStream_t compute_stream; cudaStream_t copy_l2_norm_stream; cudaStream_t reset_l2_norm_stream; cudaEvent_t compute_done; cudaEvent_t reset_l2_norm_done[2]; real l2_norms[2]; l2_norm_buf l2_norm_bufs[2]; int iy_start = 1; int iy_end = (ny - 1); CUDA_RT_CALL(cudaSetDevice(0)); CUDA_RT_CALL(cudaFree(0)); CUDA_RT_CALL(cudaMalloc(&a, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMalloc(&a_new, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a, 0, nx * ny * sizeof(real))); CUDA_RT_CALL(cudaMemset(a_new, 0, nx * ny * sizeof(real))); // Set diriclet boundary conditions on left and right boarder initialize_boundaries<<<ny / 128 + 1, 128>>>(a, a_new, PI, nx, ny); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaDeviceSynchronize()); CUDA_RT_CALL(cudaStreamCreate(&compute_stream)); CUDA_RT_CALL(cudaStreamCreate(&copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamCreate(&reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventCreateWithFlags(&compute_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[0], cudaEventDisableTiming)); CUDA_RT_CALL(cudaEventCreateWithFlags(&reset_l2_norm_done[1], cudaEventDisableTiming)); for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaEventCreateWithFlags(&l2_norm_bufs[i].copy_done, cudaEventDisableTiming)); CUDA_RT_CALL(cudaMalloc(&l2_norm_bufs[i].d, sizeof(real))); CUDA_RT_CALL(cudaMemset(l2_norm_bufs[i].d, 0, sizeof(real))); CUDA_RT_CALL(cudaMallocHost(&l2_norm_bufs[i].h, sizeof(real))); (*l2_norm_bufs[i].h) = 1.0; } CUDA_RT_CALL(cudaDeviceSynchronize()); if (!csv) printf( "Jacobi relaxation: %d iterations on %d x %d mesh with norm check " "every %d iterations\n", iter_max, ny, nx, nccheck); constexpr int dim_block_x = 32; constexpr int dim_block_y = 4; dim3 dim_grid((nx + dim_block_x - 1) / dim_block_x, (ny + dim_block_y - 1) / dim_block_y, 1); int iter = 0; for (int i = 0; i < 2; ++i) { l2_norms[i] = 0.0; } double start = omp_get_wtime(); PUSH_RANGE("Jacobi solve", 0) bool l2_norm_greater_than_tol = true; while (l2_norm_greater_than_tol && iter < iter_max) { // on new iteration: old current vars are now previous vars, old // previous vars are no longer needed int prev = iter % 2; int curr = (iter + 1) % 2; // wait for memset from old previous iteration to complete CUDA_RT_CALL(cudaStreamWaitEvent(compute_stream, reset_l2_norm_done[curr], 0)); jacobi_kernel<dim_block_x, dim_block_y> <<<dim_grid, {dim_block_x, dim_block_y, 1}, 0, compute_stream>>>( a_new, a, l2_norm_bufs[curr].d, iy_start, iy_end, nx); CUDA_RT_CALL(cudaGetLastError()); CUDA_RT_CALL(cudaEventRecord(compute_done, compute_stream)); // perform L2 norm calculation if ((iter % nccheck) == 0 || (!csv && (iter % 100) == 0)) { CUDA_RT_CALL(cudaStreamWaitEvent(copy_l2_norm_stream, compute_done, 0)); CUDA_RT_CALL(cudaMemcpyAsync(l2_norm_bufs[curr].h, l2_norm_bufs[curr].d, sizeof(real), cudaMemcpyDeviceToHost, copy_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(l2_norm_bufs[curr].copy_done, copy_l2_norm_stream)); // make sure D2H copy is complete before using the data for // calculation CUDA_RT_CALL(cudaEventSynchronize(l2_norm_bufs[prev].copy_done)); l2_norms[prev] = *(l2_norm_bufs[prev].h); l2_norms[prev] = std::sqrt(l2_norms[prev]); l2_norm_greater_than_tol = (l2_norms[prev] > tol); if (!csv && (iter % 100) == 0) { printf("%5d, %0.6f\n", iter, l2_norms[prev]); } // reset everything for next iteration l2_norms[prev] = 0.0; *(l2_norm_bufs[prev].h) = 0.0; CUDA_RT_CALL( cudaMemsetAsync(l2_norm_bufs[prev].d, 0, sizeof(real), reset_l2_norm_stream)); CUDA_RT_CALL(cudaEventRecord(reset_l2_norm_done[prev], reset_l2_norm_stream)); } std::swap(a_new, a); iter++; } CUDA_RT_CALL(cudaDeviceSynchronize()); POP_RANGE double stop = omp_get_wtime(); if (csv) { printf("single_gpu, %d, %d, %d, %d, %f\n", nx, ny, iter_max, nccheck, (stop - start)); } else { printf("%dx%d: 1 GPU: %8.4f s\n", ny, nx, (stop - start)); } for (int i = 0; i < 2; ++i) { CUDA_RT_CALL(cudaFreeHost(l2_norm_bufs[i].h)); CUDA_RT_CALL(cudaFree(l2_norm_bufs[i].d)); CUDA_RT_CALL(cudaEventDestroy(l2_norm_bufs[i].copy_done)); } CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[1])); CUDA_RT_CALL(cudaEventDestroy(reset_l2_norm_done[0])); CUDA_RT_CALL(cudaEventDestroy(compute_done)); CUDA_RT_CALL(cudaStreamDestroy(reset_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(copy_l2_norm_stream)); CUDA_RT_CALL(cudaStreamDestroy(compute_stream)); CUDA_RT_CALL(cudaFree(a_new)); CUDA_RT_CALL(cudaFree(a)); return 0; }
484df58aa5638e4f55f0a094d1d59dbf53922a98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> __global__ void reduce_kernel(float *in, float *out) { // TODO : coder ici } __host__ void init_vec(float *h_in, int ntot) { for(int i = 0 ; i < ntot ; i++) { h_in[i] = sinf(float(i)); } } __host__ void verif(float sum, float *h_in, int ntot) { float sum_res = 0.; for(int i = 0 ; i < ntot ; i++) { sum_res += h_in[i]; } float err = fabsf((sum - sum_res)/sum); printf("GPU sum : %.4e\n", sum); printf("CPU sum : %.4e\n", sum_res); if (err < 1.e-4) { printf("TEST PASSED (err %.4e < 1.e-4).\n", err); } else { printf("TEST FAILED (err %.4e > 1.e-4).\n", err); } } int main(int argc, char **argv) { float sum; int nthreads, nblocks, ntot; nthreads = 128; ntot = atoi(argv[1]); nblocks = (ntot + nthreads - 1) / nthreads; printf("Ntot : %d\n", ntot); printf("nthreads : %d\n", nthreads); printf("nblocks : %d\n", nblocks); float *d_sum, *d_bl, *d_in, *h_in; h_in = (float*)malloc(ntot*sizeof(float)); hipMalloc((void**)&d_sum, sizeof(float)); hipMalloc((void**)&d_bl, nblocks*sizeof(float)); hipMalloc((void**)&d_in, ntot*sizeof(float)); init_vec(h_in, ntot); hipMemcpy(d_in, h_in, ntot*sizeof(float), hipMemcpyHostToDevice); // TODO : la rduction de d_in a lieu ici, le resultat est obtenu dans *d_sum hipMemcpy(&sum, d_sum, sizeof(float), hipMemcpyDeviceToHost); verif(sum, h_in, ntot); hipFree(d_sum); hipFree(d_bl); hipFree(d_in); free(h_in); return 0; }
484df58aa5638e4f55f0a094d1d59dbf53922a98.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> __global__ void reduce_kernel(float *in, float *out) { // TODO : coder ici } __host__ void init_vec(float *h_in, int ntot) { for(int i = 0 ; i < ntot ; i++) { h_in[i] = sinf(float(i)); } } __host__ void verif(float sum, float *h_in, int ntot) { float sum_res = 0.; for(int i = 0 ; i < ntot ; i++) { sum_res += h_in[i]; } float err = fabsf((sum - sum_res)/sum); printf("GPU sum : %.4e\n", sum); printf("CPU sum : %.4e\n", sum_res); if (err < 1.e-4) { printf("TEST PASSED (err %.4e < 1.e-4).\n", err); } else { printf("TEST FAILED (err %.4e > 1.e-4).\n", err); } } int main(int argc, char **argv) { float sum; int nthreads, nblocks, ntot; nthreads = 128; ntot = atoi(argv[1]); nblocks = (ntot + nthreads - 1) / nthreads; printf("Ntot : %d\n", ntot); printf("nthreads : %d\n", nthreads); printf("nblocks : %d\n", nblocks); float *d_sum, *d_bl, *d_in, *h_in; h_in = (float*)malloc(ntot*sizeof(float)); cudaMalloc((void**)&d_sum, sizeof(float)); cudaMalloc((void**)&d_bl, nblocks*sizeof(float)); cudaMalloc((void**)&d_in, ntot*sizeof(float)); init_vec(h_in, ntot); cudaMemcpy(d_in, h_in, ntot*sizeof(float), cudaMemcpyHostToDevice); // TODO : la réduction de d_in a lieu ici, le resultat est obtenu dans *d_sum cudaMemcpy(&sum, d_sum, sizeof(float), cudaMemcpyDeviceToHost); verif(sum, h_in, ntot); cudaFree(d_sum); cudaFree(d_bl); cudaFree(d_in); free(h_in); return 0; }
7702d1b345241fadfe074aaeede2cd9b46947191.hip
// !!! This is a file automatically generated by hipify!!! #define DEBUG 0 #define FP_PRE 0.0000001 #define USAGE_MESSAGE "Usage: llsgpu <file>" #define DATA_POINTS 6 #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include <math_functions.h> #include "util.h" #include "cudautil.h" #include "imgutil.h" #include "rngarray_kernel.cu" #include "solver_kernel.cu" #include "maxredux_kernel.cu" // RNGArray vars extern cuda_dim RNGArray_dim; uint rnds_cnt = Solver_dim.grid_dim.x * Solver_dim.block_dim.x * 6; // Solver vars extern edge_pixel_t* h_edge_pixels; // local global vars img_t in_image; void setup(int, char*[]); void process_input(img_t*); void teardown(); int main(int argc, char* argv[]) { hr_timer_t rng_time, solver_time, accum_process; int *d_results, *h_results; uint shared_mem; ellipse_t ellipse = {0, 0, 0, 0, 0}; hipError_t err; // setup the detector setup(argc,argv); // generate all of the random numbers needed timer_start(&rng_time); hipLaunchKernelGGL(( RNGArray_kernel), dim3(RNGArray_dim.grid_dim),dim3(RNGArray_dim.block_dim), 0, 0, d_rnds,rnds_cnt); err = hipDeviceSynchronize(); if ((err = hipGetLastError()) != hipSuccess) { debug("RNGArray_kernel: %s",hipGetErrorString(err)); exit(1); } timer_end(&rng_time); shared_mem = Solver_dim.block_dim.x * sizeof(ellipse_t); timer_start(&solver_time); hipLaunchKernelGGL(( Solver_kernel), dim3(Solver_dim.grid_dim),dim3(Solver_dim.block_dim),shared_mem, 0, 1, d_edge_pixels, h_edge_pixels_cnt, d_rnds, d_center_accum, d_axes_accum, d_theta_accum ); hipDeviceSynchronize(); if ((err = hipGetLastError()) != hipSuccess) { debug("Solver_kernel: %s",hipGetErrorString(err)); exit(2); } timer_end(&solver_time); h_results = new int[3]; err = hipMalloc((void **)&d_results, sizeof(int) * 2); if (err != hipSuccess) debug("d_results malloc: %s",hipGetErrorString(err)); memset(h_results,0,sizeof(int) * 3); err = hipMemset(d_results,0,sizeof(int) * 3); if (err != hipSuccess) debug("d_results memset: %s",hipGetErrorString(err)); timer_start(&accum_process); hipLaunchKernelGGL(( MaxRedux_kernel<512>), dim3(1), dim3(512), sizeof(int) * 512, 0, d_center_accum, in_image.width * in_image.height, d_results); hipDeviceSynchronize(); if ((err = hipGetLastError()) != hipSuccess) { debug("MaxRedux_kernel(1): %s",hipGetErrorString(err)); exit(3); } hipLaunchKernelGGL(( MaxRedux_kernel<512>), dim3(1), dim3(512), sizeof(int) * 512, 0, d_axes_accum, in_image.width * in_image.height, d_results+1); hipDeviceSynchronize(); if ((err = hipGetLastError()) != hipSuccess) { debug("MaxRedux_kernel(2): %s",hipGetErrorString(err)); exit(4); } hipLaunchKernelGGL(( MaxRedux_kernel<64>), dim3(1), dim3(64), sizeof(int) * THETA_ACCUM_CNT, 0, d_theta_accum, THETA_ACCUM_CNT, d_results+2); hipDeviceSynchronize(); if ((err = hipGetLastError()) != hipSuccess) { debug("MaxRedux_kernel(2): %s",hipGetErrorString(err)); exit(4); } hipMemcpy(h_results, d_results, sizeof(int) * 3, hipMemcpyDeviceToHost); if (h_results[0] > 0) { ellipse.y = (int)floorf(h_results[0] / in_image.width); ellipse.x = h_results[0] - (int)(ellipse.y * in_image.width); } if (h_results[1] > 0) { ellipse.b = (int)floor(h_results[1] / in_image.width); ellipse.a = h_results[1] - (int)(ellipse.b * in_image.width); } ellipse.theta = h_results[2]; timer_end(&accum_process); hipFree(d_results); delete [] h_results; printf("%f %f %f %f %d %d %d %d %d\n", compute_secs(&rng_time.elapsed), compute_secs(&solver_time.elapsed), compute_secs(&accum_process.elapsed), compute_secs(&rng_time.elapsed) + compute_secs(&solver_time.elapsed) + compute_secs(&accum_process.elapsed), ellipse.x, ellipse.y, ellipse.a, ellipse.b, ellipse.theta ); // exit cleanly //teardown(); return 0; } void setup(int argc, char* argv[]) { // check the lenght of the cmd line if (argc != 2) fprintf(stderr,"%s\n",USAGE_MESSAGE); // read in the input image image_read(argv[1],&in_image); // setup the RNG for our random numbers setup_RNGArray(rnds_cnt); // setup the sovler process_input(&in_image); setup_Solver(); setup_Solver_accum(in_image.width,in_image.height); } void process_input(img_t* input) { int y, x, i; h_edge_pixels_cnt = 0; for (y=0; y < input->height; y++) { for(x=0; x < input->width; x++) { if (input->pixels[y][x].rgba != 0) h_edge_pixels_cnt++; } } h_edge_pixels = new edge_pixel_t[h_edge_pixels_cnt]; memset(h_edge_pixels,0,sizeof(edge_pixel_t) * h_edge_pixels_cnt); i = 0; for (y=0; y < input->height; y++) { for(x=0; x < input->width; x++) { if (input->pixels[y][x].rgba != 0) { h_edge_pixels[i].x = x; h_edge_pixels[i].y = y; i++; } } } } void teardown() { teardown_Solver_accum(); teardown_Solver(); teardown_RNGArray(); }
7702d1b345241fadfe074aaeede2cd9b46947191.cu
#define DEBUG 0 #define FP_PRE 0.0000001 #define USAGE_MESSAGE "Usage: llsgpu <file>" #define DATA_POINTS 6 #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <math.h> #include <time.h> #include <cuda.h> #include <math_functions.h> #include "util.h" #include "cudautil.h" #include "imgutil.h" #include "rngarray_kernel.cu" #include "solver_kernel.cu" #include "maxredux_kernel.cu" // RNGArray vars extern cuda_dim RNGArray_dim; uint rnds_cnt = Solver_dim.grid_dim.x * Solver_dim.block_dim.x * 6; // Solver vars extern edge_pixel_t* h_edge_pixels; // local global vars img_t in_image; void setup(int, char*[]); void process_input(img_t*); void teardown(); int main(int argc, char* argv[]) { hr_timer_t rng_time, solver_time, accum_process; int *d_results, *h_results; uint shared_mem; ellipse_t ellipse = {0, 0, 0, 0, 0}; cudaError_t err; // setup the detector setup(argc,argv); // generate all of the random numbers needed timer_start(&rng_time); RNGArray_kernel<<<RNGArray_dim.grid_dim,RNGArray_dim.block_dim>>>(d_rnds,rnds_cnt); err = cudaThreadSynchronize(); if ((err = cudaGetLastError()) != cudaSuccess) { debug("RNGArray_kernel: %s",cudaGetErrorString(err)); exit(1); } timer_end(&rng_time); shared_mem = Solver_dim.block_dim.x * sizeof(ellipse_t); timer_start(&solver_time); Solver_kernel<<<Solver_dim.grid_dim,Solver_dim.block_dim,shared_mem>>>(1, d_edge_pixels, h_edge_pixels_cnt, d_rnds, d_center_accum, d_axes_accum, d_theta_accum ); cudaThreadSynchronize(); if ((err = cudaGetLastError()) != cudaSuccess) { debug("Solver_kernel: %s",cudaGetErrorString(err)); exit(2); } timer_end(&solver_time); h_results = new int[3]; err = cudaMalloc((void **)&d_results, sizeof(int) * 2); if (err != cudaSuccess) debug("d_results malloc: %s",cudaGetErrorString(err)); memset(h_results,0,sizeof(int) * 3); err = cudaMemset(d_results,0,sizeof(int) * 3); if (err != cudaSuccess) debug("d_results memset: %s",cudaGetErrorString(err)); timer_start(&accum_process); MaxRedux_kernel<512><<<1, 512, sizeof(int) * 512>>>(d_center_accum, in_image.width * in_image.height, d_results); cudaThreadSynchronize(); if ((err = cudaGetLastError()) != cudaSuccess) { debug("MaxRedux_kernel(1): %s",cudaGetErrorString(err)); exit(3); } MaxRedux_kernel<512><<<1, 512, sizeof(int) * 512>>>(d_axes_accum, in_image.width * in_image.height, d_results+1); cudaThreadSynchronize(); if ((err = cudaGetLastError()) != cudaSuccess) { debug("MaxRedux_kernel(2): %s",cudaGetErrorString(err)); exit(4); } MaxRedux_kernel<64><<<1, 64, sizeof(int) * THETA_ACCUM_CNT>>>(d_theta_accum, THETA_ACCUM_CNT, d_results+2); cudaThreadSynchronize(); if ((err = cudaGetLastError()) != cudaSuccess) { debug("MaxRedux_kernel(2): %s",cudaGetErrorString(err)); exit(4); } cudaMemcpy(h_results, d_results, sizeof(int) * 3, cudaMemcpyDeviceToHost); if (h_results[0] > 0) { ellipse.y = (int)floorf(h_results[0] / in_image.width); ellipse.x = h_results[0] - (int)(ellipse.y * in_image.width); } if (h_results[1] > 0) { ellipse.b = (int)floor(h_results[1] / in_image.width); ellipse.a = h_results[1] - (int)(ellipse.b * in_image.width); } ellipse.theta = h_results[2]; timer_end(&accum_process); cudaFree(d_results); delete [] h_results; printf("%f %f %f %f %d %d %d %d %d\n", compute_secs(&rng_time.elapsed), compute_secs(&solver_time.elapsed), compute_secs(&accum_process.elapsed), compute_secs(&rng_time.elapsed) + compute_secs(&solver_time.elapsed) + compute_secs(&accum_process.elapsed), ellipse.x, ellipse.y, ellipse.a, ellipse.b, ellipse.theta ); // exit cleanly //teardown(); return 0; } void setup(int argc, char* argv[]) { // check the lenght of the cmd line if (argc != 2) fprintf(stderr,"%s\n",USAGE_MESSAGE); // read in the input image image_read(argv[1],&in_image); // setup the RNG for our random numbers setup_RNGArray(rnds_cnt); // setup the sovler process_input(&in_image); setup_Solver(); setup_Solver_accum(in_image.width,in_image.height); } void process_input(img_t* input) { int y, x, i; h_edge_pixels_cnt = 0; for (y=0; y < input->height; y++) { for(x=0; x < input->width; x++) { if (input->pixels[y][x].rgba != 0) h_edge_pixels_cnt++; } } h_edge_pixels = new edge_pixel_t[h_edge_pixels_cnt]; memset(h_edge_pixels,0,sizeof(edge_pixel_t) * h_edge_pixels_cnt); i = 0; for (y=0; y < input->height; y++) { for(x=0; x < input->width; x++) { if (input->pixels[y][x].rgba != 0) { h_edge_pixels[i].x = x; h_edge_pixels[i].y = y; i++; } } } } void teardown() { teardown_Solver_accum(); teardown_Solver(); teardown_RNGArray(); }
ab0464664e559a7a324f238b79bd11b6cf77dc0a.hip
// !!! This is a file automatically generated by hipify!!! #include <bits/stdc++.h> #include <hip/hip_runtime.h> #include <stdlib.h> #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) using namespace std; class MatrixUtility { public: void print1Dmat(double *arr, int m) { IFOR(i, 0, m) cout << arr[i] << " "; cout << '\n'; } void print2Dmat(double **arr, int m, int n) { IFOR(i, 0, m) { IFOR(j, 0, n) printf("%0.9f ", arr[i][j]); cout << '\n'; } cout << '\n'; } void init_1D_mat(double *(&arr), int n) { arr = (double *)malloc(n * sizeof(double)); } void init_2D_mat(double **(&arr), int row, int col) { arr = (double **)malloc(row * sizeof(double *)); IFOR(i, 0, row) arr[i] = (double *)malloc(col * sizeof(double)); } double **mat_add(double **(&A), double **(&B), int row, int col) { double **res = NULL; init_2D_mat(res, row, col); IFOR(i, 0, row) IFOR(j, 0, col) res[i][j] = A[i][j] + B[i][j]; return res; } double **mat_multiply(double **(&a), double **(&b), int r1, int c1, int r2, int c2) { double **c = NULL; init_2D_mat(c, r1, c2); IFOR(i, 0, r1) IFOR(j, 0, c2) IFOR(k, 0, c1) c[i][j] = c[i][j] + a[i][k] * b[k][j]; return c; } double *vector_add(double *(&a), double *(&b), int row) { double *add = NULL; init_1D_mat(add, row); IFOR(i, 0, row) add[i] = a[i] + b[i]; return add; } double **add_2D_mat_1D_mat(double **a, double *b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] += b[j]; return a; } double **diff_2D_mat_1D_mat(double **a, double *b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] -= b[i]; return a; } double **scalar_add_2D_mat(double **mat, int scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] += scalar; return mat; } double **scalar_divide_2D_mat(double **mat, double scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] /= scalar; return mat; } double **mat_transpose(double **a, int r, int c) { double **trans; init_2D_mat(trans, c, r); IFOR(i, 0, r) IFOR(j, 0, c) trans[i][j] = a[j][i]; return trans; } double **scalar_multiply_2D_mat(double **mat, int scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] *= scalar; return mat; } double *scalar_divide_1D_mat(double *mat, int scalar, int r) { UFOR(i, 0, r) mat[i] /= scalar; return mat; } double *scalar_multiply_1D_mat(double *mat, int scalar, int r) { UFOR(i, 0, r) { mat[i] *= scalar; } return mat; } double *sum_across_2nd_dim(double **a, int r, int c) { double *sum; init_1D_mat(sum, r); UFOR(i, 0, r) { sum[i] = 0; UFOR(j, 0, c) sum[i] += a[i][j]; } return sum; } double **element_wise_multiply(double **a, double **b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] *= b[i][j]; return a; } };
ab0464664e559a7a324f238b79bd11b6cf77dc0a.cu
#include <bits/stdc++.h> #include <cuda.h> #include <stdlib.h> #define IFOR(v, s, e) for(int v = s; v < e; ++v) #define UFOR(v, s, e) for(unsigned v = s; v < e; v++) using namespace std; class MatrixUtility { public: void print1Dmat(double *arr, int m) { IFOR(i, 0, m) cout << arr[i] << " "; cout << '\n'; } void print2Dmat(double **arr, int m, int n) { IFOR(i, 0, m) { IFOR(j, 0, n) printf("%0.9f ", arr[i][j]); cout << '\n'; } cout << '\n'; } void init_1D_mat(double *(&arr), int n) { arr = (double *)malloc(n * sizeof(double)); } void init_2D_mat(double **(&arr), int row, int col) { arr = (double **)malloc(row * sizeof(double *)); IFOR(i, 0, row) arr[i] = (double *)malloc(col * sizeof(double)); } double **mat_add(double **(&A), double **(&B), int row, int col) { double **res = NULL; init_2D_mat(res, row, col); IFOR(i, 0, row) IFOR(j, 0, col) res[i][j] = A[i][j] + B[i][j]; return res; } double **mat_multiply(double **(&a), double **(&b), int r1, int c1, int r2, int c2) { double **c = NULL; init_2D_mat(c, r1, c2); IFOR(i, 0, r1) IFOR(j, 0, c2) IFOR(k, 0, c1) c[i][j] = c[i][j] + a[i][k] * b[k][j]; return c; } double *vector_add(double *(&a), double *(&b), int row) { double *add = NULL; init_1D_mat(add, row); IFOR(i, 0, row) add[i] = a[i] + b[i]; return add; } double **add_2D_mat_1D_mat(double **a, double *b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] += b[j]; return a; } double **diff_2D_mat_1D_mat(double **a, double *b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] -= b[i]; return a; } double **scalar_add_2D_mat(double **mat, int scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] += scalar; return mat; } double **scalar_divide_2D_mat(double **mat, double scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] /= scalar; return mat; } double **mat_transpose(double **a, int r, int c) { double **trans; init_2D_mat(trans, c, r); IFOR(i, 0, r) IFOR(j, 0, c) trans[i][j] = a[j][i]; return trans; } double **scalar_multiply_2D_mat(double **mat, int scalar, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) mat[i][j] *= scalar; return mat; } double *scalar_divide_1D_mat(double *mat, int scalar, int r) { UFOR(i, 0, r) mat[i] /= scalar; return mat; } double *scalar_multiply_1D_mat(double *mat, int scalar, int r) { UFOR(i, 0, r) { mat[i] *= scalar; } return mat; } double *sum_across_2nd_dim(double **a, int r, int c) { double *sum; init_1D_mat(sum, r); UFOR(i, 0, r) { sum[i] = 0; UFOR(j, 0, c) sum[i] += a[i][j]; } return sum; } double **element_wise_multiply(double **a, double **b, int r, int c) { UFOR(i, 0, r) UFOR(j, 0, c) a[i][j] *= b[i][j]; return a; } };
c39a07070b80892bba0c8c2adb01774d9c229b5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "NvInfer.h" #include "common/bertCommon.h" #include "common/common.cuh" #include "common/serialize.hpp" #include "qkvToContextPlugin.h" #include <cassert> #include <cstring> #include <iostream> #include <tuple> #include <vector> #include "bertQKVToContextPlugin/fused_multihead_attention_v2/include/fused_multihead_attention_v2.h" using namespace nvinfer1; namespace nvinfer1 { namespace plugin { namespace bert { inline uint32_t asUInt32(float const& val) { return *reinterpret_cast<uint32_t const*>(reinterpret_cast<void const*>(&val)); } template <typename T, int TPB, int VPT> __global__ void maskedSoftmax(const float rsqrtHeadSize, const T* input, T* output, const int* maskIdx) { using BlockReduce = hipcub::BlockReduce<float, TPB>; union SMem { T shm[VPT * TPB]; typename BlockReduce::TempStorage reduce; SMem() {} }; __shared__ SMem tmp; // grid: (NxS, B) const int b = blockIdx.y; const int blockOffset = (b * gridDim.x + blockIdx.x) * TPB; __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(TPB, maskIdx[b]); } __syncthreads(); float local[VPT]; __shared__ float rZ; __shared__ float fMax[VPT]; const int idx = (blockOffset + threadIdx.x) * VPT; T* myshm = &tmp.shm[threadIdx.x * VPT]; copy<sizeof(T) * VPT>(&input[idx], myshm); __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = (threadIdx.x < lastValid) ? float(tmp.shm[it * TPB + threadIdx.x]) : -FLT_MAX; } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Max()); if (threadIdx.x == 0) { fMax[it] = maxElem; } __syncthreads(); } #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = (threadIdx.x < lastValid) ? myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])) : 0.f; } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Sum()); if (threadIdx.x == 0) { rZ = (1.f) / Z; } __syncthreads(); local[it] = (threadIdx.x < lastValid) ? local[it] * rZ : 0.F; } #pragma unroll for (int it = 0; it < VPT; it++) { tmp.shm[it * TPB + threadIdx.x] = local[it]; } __syncthreads(); copy<sizeof(T) * VPT>(myshm, &output[idx]); } template <typename T, int TPB, int VPT> __global__ void softmax(const float rsqrtHeadSize, const T* input, T* output) { float local[VPT]; using BlockReduce = hipcub::BlockReduce<float, TPB>; union SMem { T shm[VPT * TPB]; typename BlockReduce::TempStorage reduce; SMem() {} }; __shared__ SMem tmp; __shared__ float rZ; __shared__ float fMax[VPT]; const int idx = (TPB * blockIdx.x + threadIdx.x) * VPT; T* myshm = &tmp.shm[threadIdx.x * VPT]; copy<sizeof(T) * VPT>(&input[idx], myshm); __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = float(tmp.shm[it * TPB + threadIdx.x]); } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Max()); if (threadIdx.x == 0) { fMax[it] = maxElem; } __syncthreads(); } #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])); } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], hipcub::Sum()); if (threadIdx.x == 0) { rZ = 1.f / Z; } __syncthreads(); local[it] *= rZ; } #pragma unroll for (int it = 0; it < VPT; it++) { tmp.shm[it * TPB + threadIdx.x] = local[it]; } __syncthreads(); copy<sizeof(T) * VPT>(myshm, &output[idx]); } template <typename T, unsigned TPB> __global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output) { scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output); } template <typename T, unsigned TPB> __global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output) { scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output); } template <typename T> int computeScaledSoftmax( hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output) { constexpr int VPT = 16 / sizeof(T); const dim3 grid(ld * N, B, 1); if (ld <= 32) { const int blockSize = 32; hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output); } else if (ld < 128) { const int blockSize = 128; hipLaunchKernelGGL(( scaledSoftmaxKernelSmall<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output); } else if (ld == 128) { const int grid = B * N * ld / (VPT); hipLaunchKernelGGL(( softmax<T, 128, VPT>), dim3(grid), dim3(128), 0, stream, rsqrtHeadSize, input, output); } else if (ld == 384) { const int grid = B * N * ld / (VPT); hipLaunchKernelGGL(( softmax<T, 384, VPT>), dim3(grid), dim3(384), 0, stream, rsqrtHeadSize, input, output); } else { const int blockSize = 256; hipLaunchKernelGGL(( scaledSoftmaxKernel<T, blockSize>), dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, input, output); } PLUGIN_CHECK(hipPeekAtLastError()); return 0; } template <typename T, unsigned TPB> __global__ void maskedScaledSoftmaxKernelSmall( const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(ld, maskIdx[blockIdx.y]); } __syncthreads(); scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output); } template <typename T, unsigned TPB> __global__ void maskedScaledSoftmaxKernel( const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(ld, maskIdx[blockIdx.y]); } __syncthreads(); scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output); } template <typename T> int computeMaskedScaledSoftmax(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { // Mask idx is of length B and assumes the valid region is contiguous starting // from the beginning of the sequence const dim3 grid(ld * N, B, 1); // for smaller problems, e.g. BERT base B=1, this is not optimal if (ld <= 32) { constexpr int blockSize = 32; hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output); } else if (ld < 128) { constexpr int blockSize = 128; hipLaunchKernelGGL(( maskedScaledSoftmaxKernelSmall<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output); } else if (ld == 128) { if (B == 1) { constexpr int VPT = 4 / sizeof(T); constexpr int blockSize = 128; const dim3 grid(ld * N / VPT, B, 1); hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx); } else { constexpr int VPT = 16 / sizeof(T); constexpr int blockSize = 128; const dim3 grid(ld * N / VPT, B, 1); hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx); } } else if (ld == 384) { if (B == 1) { constexpr int VPT = 4 / sizeof(T); constexpr int blockSize = 384; const dim3 grid(ld * N / VPT, B, 1); hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx); } else { constexpr int VPT = 16 / sizeof(T); constexpr int blockSize = 384; const dim3 grid(ld * N / VPT, B, 1); hipLaunchKernelGGL(( maskedSoftmax<T, blockSize, VPT>), dim3(grid), dim3(blockSize), 0, stream, rsqrtHeadSize, input, output, maskIdx); } } else { constexpr int blockSize = 256; hipLaunchKernelGGL(( maskedScaledSoftmaxKernel<T, blockSize>) , dim3(grid), dim3(blockSize), 0, stream, ld, rsqrtHeadSize, maskIdx, input, output); } PLUGIN_CHECK(hipPeekAtLastError()); return 0; } std::pair<int, int> tuneBatchedGemm( const int B, const int S, const int numHeads, const int headSize, const int smVersion) { const int nruns = 500; hipblasHandle_t cublas; PLUGIN_CUBLASASSERT(hipblasCreate(&cublas)); hipStream_t stream; PLUGIN_CUASSERT(hipStreamCreate(&stream)); hipEvent_t start, stop; PLUGIN_CUASSERT(hipEventCreate(&start)); PLUGIN_CUASSERT(hipEventCreate(&stop)); PLUGIN_CUBLASASSERT(hipblasSetStream(cublas, stream)); PLUGIN_CUBLASASSERT(cublasSetMathMode(cublas, CUBLAS_TENSOR_OP_MATH)); using T = half; const int omatSize = S * S; const int numMats = B * numHeads; const int ldQKV = 3 * B * numHeads * headSize; const int strideQKV = 3 * headSize; const int ldOut = B * numHeads * headSize; const int strideOut = headSize; const size_t inBytes = S * B * 3 * numHeads * headSize * sizeof(T); const size_t qkBytes = S * S * B * numHeads * sizeof(T); const size_t outBytes = S * B * numHeads * headSize * sizeof(T); T* input = nullptr; T* qkptr = nullptr; T* output = nullptr; PLUGIN_CUASSERT(hipMalloc(&input, inBytes)); PLUGIN_CUASSERT(hipMalloc(&qkptr, qkBytes)); PLUGIN_CUASSERT(hipMalloc(&output, outBytes)); PLUGIN_CUASSERT(hipMemset(input, 1, inBytes)); PLUGIN_CUASSERT(hipMemset(qkptr, 1, qkBytes)); // input: SxBx3xNxH const T* qptr = input; const T* kptr = qptr + headSize; const T* vptr = kptr + headSize; const int startAlgo = (int) CUBLAS_GEMM_DEFAULT_TENSOR_OP; const int endAlgo = (int) CUBLAS_GEMM_ALGO15_TENSOR_OP; int best1 = startAlgo; int best2 = startAlgo; float ms1 = 1000000; float ms2 = 1000000; PLUGIN_ASSERT(smVersion >= kSM_53); for (int a = startAlgo; a <= endAlgo; a++) { hipblasGemmAlgo_t algo = static_cast<hipblasGemmAlgo_t>(a); float ms1_, ms2_; // qkptr: BxNxSxS PLUGIN_CUASSERT(hipEventRecord(start, stream)); for (int r = 0; r < nruns; r++) { PLUGIN_CUBLASASSERT(hipblasGemmStridedBatchedEx<T>(cublas, HIPBLAS_OP_T, HIPBLAS_OP_N, S, S, headSize, T(1.f), kptr, ldQKV, strideQKV, qptr, ldQKV, strideQKV, T(0.f), qkptr, S, omatSize, numMats, algo)); } PLUGIN_CUASSERT(hipEventRecord(stop, stream)); PLUGIN_CUASSERT(hipStreamSynchronize(stream)); PLUGIN_CUASSERT(hipEventElapsedTime(&ms1_, start, stop)); if (ms1_ < ms1) { best1 = algo; ms1 = ms1_; } // pptr: BxNxSxS // output: SxBxNxH PLUGIN_CUASSERT(hipEventRecord(start, stream)); for (int r = 0; r < nruns; r++) { PLUGIN_CUBLASASSERT(hipblasGemmStridedBatchedEx<T>(cublas, HIPBLAS_OP_N, HIPBLAS_OP_N, headSize, S, S, 1.f, vptr, ldQKV, strideQKV, qkptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats, algo)); } PLUGIN_CUASSERT(hipEventRecord(stop, stream)); PLUGIN_CUASSERT(hipStreamSynchronize(stream)); PLUGIN_CUASSERT(hipEventElapsedTime(&ms2_, start, stop)); if (ms2_ < ms2) { best2 = algo; ms2 = ms2_; } } PLUGIN_CUASSERT(hipFree(input)); PLUGIN_CUASSERT(hipFree(qkptr)); PLUGIN_CUASSERT(hipFree(output)); PLUGIN_CUASSERT(hipEventDestroy(start)); PLUGIN_CUASSERT(hipEventDestroy(stop)); PLUGIN_CUASSERT(hipStreamDestroy(stream)); PLUGIN_CUBLASASSERT(hipblasDestroy(cublas)); return std::make_pair(best1, best2); } template int computeScaledSoftmax<float>(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const float* input, float* output); template int computeScaledSoftmax<half>(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const half* input, half* output); template int computeMaskedScaledSoftmax<float>(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const float* input, float* output); template int computeMaskedScaledSoftmax<half>(hipStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const half* input, half* output); size_t MHARunner::getSerializationSize() const noexcept { return sizeof(mS) + sizeof(mB); } void MHARunner::serialize(void* buffer) const noexcept { serialize_value(&buffer, mS); serialize_value(&buffer, mB); } void MHARunner::deserialize(const void* data, size_t length) { deserialize_value(&data, &length, &mS); deserialize_value(&data, &length, &mB); setup(mS, mB); } UnfusedMHARunner::UnfusedMHARunner(const nvinfer1::DataType type, const int numHeads, const int headSize, const int sm) : MHARunner(type, numHeads, headSize) , mIsBestAlgoFound(false) , mAlgoBatchedEx1(CUBLAS_GEMM_DEFAULT_TENSOR_OP) , mAlgoBatchedEx2(CUBLAS_GEMM_DEFAULT_TENSOR_OP) , mSm(sm) { PLUGIN_CUBLASASSERT(hipblasCreate(&mCublas)); } UnfusedMHARunner::~UnfusedMHARunner() { PLUGIN_CUBLASASSERT(hipblasDestroy(mCublas)); } size_t UnfusedMHARunner::getSerializationSize() const noexcept { return sizeof(mAlgoBatchedEx1) + sizeof(mAlgoBatchedEx2) + MHARunner::getSerializationSize(); } void UnfusedMHARunner::serialize(void* buffer) const noexcept { serialize_value(&buffer, mAlgoBatchedEx1); serialize_value(&buffer, mAlgoBatchedEx2); MHARunner::serialize(buffer); } void UnfusedMHARunner::deserialize(const void* data, size_t length) { mIsBestAlgoFound = true; deserialize_value(&data, &length, &mAlgoBatchedEx1); deserialize_value(&data, &length, &mAlgoBatchedEx2); MHARunner::deserialize(data, length); } void UnfusedMHARunner::setup(const int S, const int B) { MHARunner::setup(S, B); if (mType == DataType::kHALF && !mIsBestAlgoFound) { std::tie(mAlgoBatchedEx1, mAlgoBatchedEx2) = tuneBatchedGemm(B, S, mNumHeads, mHeadSize, mSm); mIsBestAlgoFound = true; BERT_DEBUG_VALUE("QKV Plugin - Selected Algo 1 for batch gemms: ", mAlgoBatchedEx1); BERT_DEBUG_VALUE("QKV Plugin - Selected Algo 2 for batch gemms: ", mAlgoBatchedEx2); } } size_t UnfusedMHARunner::getWorkspaceSize() const { return 2UL * mWordSize * mOmatSize * mNumMats; } void UnfusedMHARunner::run(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { this->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], outputs[0], workspace, stream); } void UnfusedMHARunner::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { const int* maskIdx = static_cast<const int*>(maskPtr); PLUGIN_CUBLASASSERT(hipblasSetStream(mCublas, stream)); // Q, K, V: BxNxSxH (inputs) // Q * K': BxNxSxS (-> scratch1) // P: BxNxSxS (-> scratch2) // P * V: BxNxSxH (output) if (mType == DataType::kHALF) { CublasConfigHelper helper(mCublas); const half* qptr = static_cast<const half*>(qkvPtr); const half* kptr = qptr + mHeadSize; const half* vptr = kptr + mHeadSize; half* qkptr = static_cast<half*>(workspace); half* pptr = qkptr + mOmatSize * mNumMats; half alpha = 1.f; half beta = 0.f; PLUGIN_CUBLASASSERT(::hipblasGemmStridedBatchedEx(mCublas, HIPBLAS_OP_T, HIPBLAS_OP_N, mS, mS, mHeadSize, &alpha, kptr, HIP_R_16F, mLdQKV, mStrideQKV, qptr, HIP_R_16F, mLdQKV, mStrideQKV, &beta, qkptr, HIP_R_16F, mS, mOmatSize, mNumMats, HIP_R_16F, static_cast<hipblasGemmAlgo_t>(mAlgoBatchedEx1))); // apply softmax if (maskIdx) { // if we have a mask computeMaskedScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr); } else { // if we don't have a mask computeScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr); } // compute P*V (as V*P) PLUGIN_CUBLASASSERT(hipblasGemmStridedBatchedEx(mCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, mHeadSize, mS, mS, &alpha, vptr, HIP_R_16F, mLdQKV, mStrideQKV, pptr, HIP_R_16F, mS, mOmatSize, &beta, output, HIP_R_16F, mLdOut, mStrideOut, mNumMats, HIP_R_16F, static_cast<hipblasGemmAlgo_t>(mAlgoBatchedEx2))); } else { const float* qptr = static_cast<const float*>(qkvPtr); const float* kptr = qptr + mHeadSize; const float* vptr = kptr + mHeadSize; float* qkptr = static_cast<float*>(workspace); float* pptr = qkptr + mOmatSize * mNumMats; float* outptr = static_cast<float*>(output); PLUGIN_CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, HIPBLAS_OP_T, HIPBLAS_OP_N, mS, mS, mHeadSize, 1.f, kptr, mLdQKV, mStrideQKV, qptr, mLdQKV, mStrideQKV, 0.f, qkptr, mS, mOmatSize, mNumMats)); // apply softmax if (maskIdx) { // if we have a mask computeMaskedScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr); } else { // if we don't have a mask computeScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr); } PLUGIN_CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, HIPBLAS_OP_N, HIPBLAS_OP_N, mHeadSize, mS, mS, 1.f, vptr, mLdQKV, mStrideQKV, pptr, mS, mOmatSize, 0.f, outptr, mLdOut, mStrideOut, mNumMats)); } } bool UnfusedMHARunner::isValid(int s) const { return mType != DataType::kINT8; } static inline void set_alpha(uint32_t& alpha, float norm, Data_type dtype) { if (dtype == DATA_TYPE_FP16) { half2 h2 = __float2half2_rn(norm); alpha = reinterpret_cast<const uint32_t&>(h2); } else if (dtype == DATA_TYPE_FP32) { alpha = reinterpret_cast<const uint32_t&>(norm); } else if (dtype == DATA_TYPE_INT32) { int32_t inorm = static_cast<int32_t>(norm); alpha = reinterpret_cast<const uint32_t&>(inorm); } else { assert(false); } } class FusedMHARunnerFP16::mhaImpl { public: mhaImpl(FusedMHARunnerFP16* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernels(DATA_TYPE_FP16, sm)) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { // check that we initialized assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { // TODO these implementation details might be better centralized into the XMMA code, since they are needed in // several places (also outside of this plugin) size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; if (S == 64 || S == 96 || S == 128) { warps_m = 2; warps_n = 2; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const float scale_bmm1 = interface->mRsqrtHeadSize; const float scale_softmax = 1.f; // Seems to be only required for int8 const float scale_bmm2 = 1.f; Data_type scale_type = DATA_TYPE_FP16; set_alpha(params.scale_bmm1, scale_bmm1, scale_type); set_alpha(params.scale_softmax, scale_softmax, scale_type); set_alpha(params.scale_bmm2, scale_bmm2, scale_type); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_FP16); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_FP16); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); params.packed_mask_ptr = const_cast<void*>(maskPtr); params.o_ptr = output; xmmaKernel->run(params, stream); PLUGIN_CHECK(hipPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: FusedMHARunnerFP16* interface; Fused_multihead_attention_params params; int sm; const FusedMultiHeadAttentionXMMAKernel* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerFP16::FusedMHARunnerFP16(const int numHeads, const int headSize, const int sm) : MHARunner(DataType::kHALF, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerFP16::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerFP16::getWorkspaceSize() const { return 0; } void FusedMHARunnerFP16::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerFP16::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerFP16::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { assert(false && "not implemented"); } bool FusedMHARunnerFP16::isValid(int s) const { return pimpl->isValid(s); } // Int8 starts here: TODO refactor the duplicate stuff class FusedMHARunnerInt8::mhaImpl { public: mhaImpl(FusedMHARunnerInt8* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernels(DATA_TYPE_INT8, sm)) , mDqProbs(interface->mDqProbs) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; if (S == 128) { warps_m = 2; warps_n = 2; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_INT8); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_INT8); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { float scaleQkv = inputDesc.scale; float scaleCtx = outputDesc.scale; float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize; float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx; float scaleSoftmax = 1.f / mDqProbs; params.scale_bmm1 = asUInt32(scaleBmm1); params.scale_bmm2 = asUInt32(scaleBmm2); params.scale_softmax = asUInt32(scaleSoftmax); params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f; params.qkv_ptr = const_cast<void*>(qkvPtr); params.packed_mask_ptr = const_cast<void*>(maskPtr); params.o_ptr = output; xmmaKernel->run(params, stream); PLUGIN_CHECK(hipPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: float mDqProbs; FusedMHARunnerInt8* interface; Fused_multihead_attention_params params; int sm; const FusedMultiHeadAttentionXMMAKernel* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerInt8::FusedMHARunnerInt8(const int numHeads, const int headSize, const int sm, const float dqProbs) : MHARunner(DataType::kINT8, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) , mDqProbs(dqProbs) { } void FusedMHARunnerInt8::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerInt8::getWorkspaceSize() const { return 0; } void FusedMHARunnerInt8::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerInt8::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerInt8::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { assert(false && "not implemented"); } bool FusedMHARunnerInt8::isValid(int s) const { return pimpl->isValid(s); } class FusedMHARunnerFP16v2::mhaImpl { public: mhaImpl(FusedMHARunnerFP16v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_87 || sm == kSM_89 || sm == kSM_90) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { // check that we initialized assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { // TODO these implementation details might be better centralized into the XMMA code, since they are needed in // several places (also outside of this plugin) size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; // [MLPINF-1894] HGMMA has a different warp group. // TODO: add S==64/96/512 HGMMA support for sm==90 if (sm == kSM_90 && (S == 128 || S == 256 || S == 384)) { warps_m = 4; warps_n = 1; } else { if (S == 64 || S == 96 || S == 128) { warps_m = 2; warps_n = 2; } else if (S == 256 || S == 192) { warps_m = 1; warps_n = 4; } else if (S == 384 || S == 512) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const float scale_bmm1 = interface->mRsqrtHeadSize; const float scale_softmax = 1.f; // Seems to be only required for int8 const float scale_bmm2 = 1.f; Data_type scale_type = DATA_TYPE_FP16; set_alpha(params.scale_bmm1, scale_bmm1, scale_type); set_alpha(params.scale_softmax, scale_softmax, scale_type); set_alpha(params.scale_bmm2, scale_bmm2, scale_type); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; // mLdQKV = 3 * B * mNumHeads * mHeadSize; // mLdOut = B * mNumHeads * mHeadSize; params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, hipStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); // dummy input in V2/V3 because now we use cu_seqlens params.packed_mask_ptr = nullptr; params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); PLUGIN_CHECK(hipPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: FusedMHARunnerFP16v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm) : MHARunner(DataType::kHALF, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerFP16v2::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerFP16v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerFP16v2::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerFP16v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { assert(false && "not implemented"); // pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerFP16v2::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream); } bool FusedMHARunnerFP16v2::isValid(int s) const { return pimpl->isValid(s); } // Int8 starts here: TODO refactor the duplicate stuff class FusedMHARunnerInt8v2::mhaImpl { public: mhaImpl(FusedMHARunnerInt8v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_INT8, sm)) , mDqProbs(interface->mDqProbs) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_87 || sm == kSM_89 || sm == kSM_90) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; // [MLPINF-1894] IGMMA has a different warp group. // TODO: add S==64/96 IGMMA support for sm==90 if (sm == kSM_90 && (S == 128 || S == 192 || S == 256 || S == 384 || S == 512)) { if (S == 512) { warps_m = 4; warps_n = 2; } else { warps_m = 4; warps_n = 1; } } else { if (S == 128) { warps_m = 2; warps_n = 2; } else if (S == 256 || S == 192) { warps_m = 1; warps_n = 4; } else if (S == 384 || S == 512) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupported seqlen."); } } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.use_int8_scale_max = interface->mUseInt8ScaleMax; params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, hipStream_t stream) { float scaleQkv = inputDesc.scale; float scaleCtx = outputDesc.scale; float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize; float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx; float scaleSoftmax = 1.f / mDqProbs; params.scale_bmm1 = asUInt32(scaleBmm1); params.scale_bmm2 = asUInt32(scaleBmm2); params.scale_softmax = asUInt32(scaleSoftmax); params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f; params.qkv_ptr = const_cast<void*>(qkvPtr); // dummy input in V2/V3 because now we use cu_seqlens params.packed_mask_ptr = nullptr; params.use_int8_scale_max = interface->mUseInt8ScaleMax; params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); PLUGIN_CHECK(hipPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: float mDqProbs; FusedMHARunnerInt8v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerInt8v2::FusedMHARunnerInt8v2(const int numHeads, const int headSize, const int sm, const float dqProbs, bool const useInt8ScaleMax) : MHARunner(DataType::kINT8, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) , mDqProbs(dqProbs) , mUseInt8ScaleMax(useInt8ScaleMax) { } void FusedMHARunnerInt8v2::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerInt8v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerInt8v2::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerInt8v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, hipStream_t stream) { assert(false && "Not implemented"); } void FusedMHARunnerInt8v2::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream); } bool FusedMHARunnerInt8v2::isValid(int s) const { return pimpl->isValid(s); } } // namespace bert } // namespace plugin } // namespace nvinfer1
c39a07070b80892bba0c8c2adb01774d9c229b5f.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "NvInfer.h" #include "common/bertCommon.h" #include "common/common.cuh" #include "common/serialize.hpp" #include "qkvToContextPlugin.h" #include <cassert> #include <cstring> #include <iostream> #include <tuple> #include <vector> #include "bertQKVToContextPlugin/fused_multihead_attention_v2/include/fused_multihead_attention_v2.h" using namespace nvinfer1; namespace nvinfer1 { namespace plugin { namespace bert { inline uint32_t asUInt32(float const& val) { return *reinterpret_cast<uint32_t const*>(reinterpret_cast<void const*>(&val)); } template <typename T, int TPB, int VPT> __global__ void maskedSoftmax(const float rsqrtHeadSize, const T* input, T* output, const int* maskIdx) { using BlockReduce = cub::BlockReduce<float, TPB>; union SMem { T shm[VPT * TPB]; typename BlockReduce::TempStorage reduce; SMem() {} }; __shared__ SMem tmp; // grid: (NxS, B) const int b = blockIdx.y; const int blockOffset = (b * gridDim.x + blockIdx.x) * TPB; __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(TPB, maskIdx[b]); } __syncthreads(); float local[VPT]; __shared__ float rZ; __shared__ float fMax[VPT]; const int idx = (blockOffset + threadIdx.x) * VPT; T* myshm = &tmp.shm[threadIdx.x * VPT]; copy<sizeof(T) * VPT>(&input[idx], myshm); __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = (threadIdx.x < lastValid) ? float(tmp.shm[it * TPB + threadIdx.x]) : -FLT_MAX; } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], cub::Max()); if (threadIdx.x == 0) { fMax[it] = maxElem; } __syncthreads(); } #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = (threadIdx.x < lastValid) ? myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])) : 0.f; } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], cub::Sum()); if (threadIdx.x == 0) { rZ = (1.f) / Z; } __syncthreads(); local[it] = (threadIdx.x < lastValid) ? local[it] * rZ : 0.F; } #pragma unroll for (int it = 0; it < VPT; it++) { tmp.shm[it * TPB + threadIdx.x] = local[it]; } __syncthreads(); copy<sizeof(T) * VPT>(myshm, &output[idx]); } template <typename T, int TPB, int VPT> __global__ void softmax(const float rsqrtHeadSize, const T* input, T* output) { float local[VPT]; using BlockReduce = cub::BlockReduce<float, TPB>; union SMem { T shm[VPT * TPB]; typename BlockReduce::TempStorage reduce; SMem() {} }; __shared__ SMem tmp; __shared__ float rZ; __shared__ float fMax[VPT]; const int idx = (TPB * blockIdx.x + threadIdx.x) * VPT; T* myshm = &tmp.shm[threadIdx.x * VPT]; copy<sizeof(T) * VPT>(&input[idx], myshm); __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = float(tmp.shm[it * TPB + threadIdx.x]); } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { float maxElem = BlockReduce(tmp.reduce).Reduce(local[it], cub::Max()); if (threadIdx.x == 0) { fMax[it] = maxElem; } __syncthreads(); } #pragma unroll for (int it = 0; it < VPT; it++) { local[it] = myExp<float>(rsqrtHeadSize * (local[it] - fMax[it])); } __syncthreads(); #pragma unroll for (int it = 0; it < VPT; it++) { const auto Z = BlockReduce(tmp.reduce).Reduce(local[it], cub::Sum()); if (threadIdx.x == 0) { rZ = 1.f / Z; } __syncthreads(); local[it] *= rZ; } #pragma unroll for (int it = 0; it < VPT; it++) { tmp.shm[it * TPB + threadIdx.x] = local[it]; } __syncthreads(); copy<sizeof(T) * VPT>(myshm, &output[idx]); } template <typename T, unsigned TPB> __global__ void scaledSoftmaxKernelSmall(const int ld, const float rsqrtHeadSize, const T* input, T* output) { scaledSoftmaxSmall<T, TPB>(ld, ld, rsqrtHeadSize, input, output); } template <typename T, unsigned TPB> __global__ void scaledSoftmaxKernel(const int ld, const float rsqrtHeadSize, const T* input, T* output) { scaledSoftmax<T, TPB>(ld, ld, rsqrtHeadSize, input, output); } template <typename T> int computeScaledSoftmax( cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const T* input, T* output) { constexpr int VPT = 16 / sizeof(T); const dim3 grid(ld * N, B, 1); if (ld <= 32) { const int blockSize = 32; scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output); } else if (ld < 128) { const int blockSize = 128; scaledSoftmaxKernelSmall<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output); } else if (ld == 128) { const int grid = B * N * ld / (VPT); softmax<T, 128, VPT><<<grid, 128, 0, stream>>>(rsqrtHeadSize, input, output); } else if (ld == 384) { const int grid = B * N * ld / (VPT); softmax<T, 384, VPT><<<grid, 384, 0, stream>>>(rsqrtHeadSize, input, output); } else { const int blockSize = 256; scaledSoftmaxKernel<T, blockSize><<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, input, output); } PLUGIN_CHECK(cudaPeekAtLastError()); return 0; } template <typename T, unsigned TPB> __global__ void maskedScaledSoftmaxKernelSmall( const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(ld, maskIdx[blockIdx.y]); } __syncthreads(); scaledSoftmaxSmall<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output); } template <typename T, unsigned TPB> __global__ void maskedScaledSoftmaxKernel( const int ld, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { __shared__ int lastValid; if (threadIdx.x == 0) { lastValid = min(ld, maskIdx[blockIdx.y]); } __syncthreads(); scaledSoftmax<T, TPB>(ld, lastValid, rsqrtHeadSize, input, output); } template <typename T> int computeMaskedScaledSoftmax(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const T* input, T* output) { // Mask idx is of length B and assumes the valid region is contiguous starting // from the beginning of the sequence const dim3 grid(ld * N, B, 1); // for smaller problems, e.g. BERT base B=1, this is not optimal if (ld <= 32) { constexpr int blockSize = 32; maskedScaledSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output); } else if (ld < 128) { constexpr int blockSize = 128; maskedScaledSoftmaxKernelSmall<T, blockSize> <<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output); } else if (ld == 128) { if (B == 1) { constexpr int VPT = 4 / sizeof(T); constexpr int blockSize = 128; const dim3 grid(ld * N / VPT, B, 1); maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx); } else { constexpr int VPT = 16 / sizeof(T); constexpr int blockSize = 128; const dim3 grid(ld * N / VPT, B, 1); maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx); } } else if (ld == 384) { if (B == 1) { constexpr int VPT = 4 / sizeof(T); constexpr int blockSize = 384; const dim3 grid(ld * N / VPT, B, 1); maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx); } else { constexpr int VPT = 16 / sizeof(T); constexpr int blockSize = 384; const dim3 grid(ld * N / VPT, B, 1); maskedSoftmax<T, blockSize, VPT><<<grid, blockSize, 0, stream>>>(rsqrtHeadSize, input, output, maskIdx); } } else { constexpr int blockSize = 256; maskedScaledSoftmaxKernel<T, blockSize> <<<grid, blockSize, 0, stream>>>(ld, rsqrtHeadSize, maskIdx, input, output); } PLUGIN_CHECK(cudaPeekAtLastError()); return 0; } std::pair<int, int> tuneBatchedGemm( const int B, const int S, const int numHeads, const int headSize, const int smVersion) { const int nruns = 500; cublasHandle_t cublas; PLUGIN_CUBLASASSERT(cublasCreate(&cublas)); cudaStream_t stream; PLUGIN_CUASSERT(cudaStreamCreate(&stream)); cudaEvent_t start, stop; PLUGIN_CUASSERT(cudaEventCreate(&start)); PLUGIN_CUASSERT(cudaEventCreate(&stop)); PLUGIN_CUBLASASSERT(cublasSetStream(cublas, stream)); PLUGIN_CUBLASASSERT(cublasSetMathMode(cublas, CUBLAS_TENSOR_OP_MATH)); using T = half; const int omatSize = S * S; const int numMats = B * numHeads; const int ldQKV = 3 * B * numHeads * headSize; const int strideQKV = 3 * headSize; const int ldOut = B * numHeads * headSize; const int strideOut = headSize; const size_t inBytes = S * B * 3 * numHeads * headSize * sizeof(T); const size_t qkBytes = S * S * B * numHeads * sizeof(T); const size_t outBytes = S * B * numHeads * headSize * sizeof(T); T* input = nullptr; T* qkptr = nullptr; T* output = nullptr; PLUGIN_CUASSERT(cudaMalloc(&input, inBytes)); PLUGIN_CUASSERT(cudaMalloc(&qkptr, qkBytes)); PLUGIN_CUASSERT(cudaMalloc(&output, outBytes)); PLUGIN_CUASSERT(cudaMemset(input, 1, inBytes)); PLUGIN_CUASSERT(cudaMemset(qkptr, 1, qkBytes)); // input: SxBx3xNxH const T* qptr = input; const T* kptr = qptr + headSize; const T* vptr = kptr + headSize; const int startAlgo = (int) CUBLAS_GEMM_DEFAULT_TENSOR_OP; const int endAlgo = (int) CUBLAS_GEMM_ALGO15_TENSOR_OP; int best1 = startAlgo; int best2 = startAlgo; float ms1 = 1000000; float ms2 = 1000000; PLUGIN_ASSERT(smVersion >= kSM_53); for (int a = startAlgo; a <= endAlgo; a++) { cublasGemmAlgo_t algo = static_cast<cublasGemmAlgo_t>(a); float ms1_, ms2_; // qkptr: BxNxSxS PLUGIN_CUASSERT(cudaEventRecord(start, stream)); for (int r = 0; r < nruns; r++) { PLUGIN_CUBLASASSERT(cublasGemmStridedBatchedEx<T>(cublas, CUBLAS_OP_T, CUBLAS_OP_N, S, S, headSize, T(1.f), kptr, ldQKV, strideQKV, qptr, ldQKV, strideQKV, T(0.f), qkptr, S, omatSize, numMats, algo)); } PLUGIN_CUASSERT(cudaEventRecord(stop, stream)); PLUGIN_CUASSERT(cudaStreamSynchronize(stream)); PLUGIN_CUASSERT(cudaEventElapsedTime(&ms1_, start, stop)); if (ms1_ < ms1) { best1 = algo; ms1 = ms1_; } // pptr: BxNxSxS // output: SxBxNxH PLUGIN_CUASSERT(cudaEventRecord(start, stream)); for (int r = 0; r < nruns; r++) { PLUGIN_CUBLASASSERT(cublasGemmStridedBatchedEx<T>(cublas, CUBLAS_OP_N, CUBLAS_OP_N, headSize, S, S, 1.f, vptr, ldQKV, strideQKV, qkptr, S, omatSize, 0.f, output, ldOut, strideOut, numMats, algo)); } PLUGIN_CUASSERT(cudaEventRecord(stop, stream)); PLUGIN_CUASSERT(cudaStreamSynchronize(stream)); PLUGIN_CUASSERT(cudaEventElapsedTime(&ms2_, start, stop)); if (ms2_ < ms2) { best2 = algo; ms2 = ms2_; } } PLUGIN_CUASSERT(cudaFree(input)); PLUGIN_CUASSERT(cudaFree(qkptr)); PLUGIN_CUASSERT(cudaFree(output)); PLUGIN_CUASSERT(cudaEventDestroy(start)); PLUGIN_CUASSERT(cudaEventDestroy(stop)); PLUGIN_CUASSERT(cudaStreamDestroy(stream)); PLUGIN_CUBLASASSERT(cublasDestroy(cublas)); return std::make_pair(best1, best2); } template int computeScaledSoftmax<float>(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const float* input, float* output); template int computeScaledSoftmax<half>(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const half* input, half* output); template int computeMaskedScaledSoftmax<float>(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const float* input, float* output); template int computeMaskedScaledSoftmax<half>(cudaStream_t stream, const int ld, const int B, const int N, const float rsqrtHeadSize, const int* maskIdx, const half* input, half* output); size_t MHARunner::getSerializationSize() const noexcept { return sizeof(mS) + sizeof(mB); } void MHARunner::serialize(void* buffer) const noexcept { serialize_value(&buffer, mS); serialize_value(&buffer, mB); } void MHARunner::deserialize(const void* data, size_t length) { deserialize_value(&data, &length, &mS); deserialize_value(&data, &length, &mB); setup(mS, mB); } UnfusedMHARunner::UnfusedMHARunner(const nvinfer1::DataType type, const int numHeads, const int headSize, const int sm) : MHARunner(type, numHeads, headSize) , mIsBestAlgoFound(false) , mAlgoBatchedEx1(CUBLAS_GEMM_DEFAULT_TENSOR_OP) , mAlgoBatchedEx2(CUBLAS_GEMM_DEFAULT_TENSOR_OP) , mSm(sm) { PLUGIN_CUBLASASSERT(cublasCreate(&mCublas)); } UnfusedMHARunner::~UnfusedMHARunner() { PLUGIN_CUBLASASSERT(cublasDestroy(mCublas)); } size_t UnfusedMHARunner::getSerializationSize() const noexcept { return sizeof(mAlgoBatchedEx1) + sizeof(mAlgoBatchedEx2) + MHARunner::getSerializationSize(); } void UnfusedMHARunner::serialize(void* buffer) const noexcept { serialize_value(&buffer, mAlgoBatchedEx1); serialize_value(&buffer, mAlgoBatchedEx2); MHARunner::serialize(buffer); } void UnfusedMHARunner::deserialize(const void* data, size_t length) { mIsBestAlgoFound = true; deserialize_value(&data, &length, &mAlgoBatchedEx1); deserialize_value(&data, &length, &mAlgoBatchedEx2); MHARunner::deserialize(data, length); } void UnfusedMHARunner::setup(const int S, const int B) { MHARunner::setup(S, B); if (mType == DataType::kHALF && !mIsBestAlgoFound) { std::tie(mAlgoBatchedEx1, mAlgoBatchedEx2) = tuneBatchedGemm(B, S, mNumHeads, mHeadSize, mSm); mIsBestAlgoFound = true; BERT_DEBUG_VALUE("QKV Plugin - Selected Algo 1 for batch gemms: ", mAlgoBatchedEx1); BERT_DEBUG_VALUE("QKV Plugin - Selected Algo 2 for batch gemms: ", mAlgoBatchedEx2); } } size_t UnfusedMHARunner::getWorkspaceSize() const { return 2UL * mWordSize * mOmatSize * mNumMats; } void UnfusedMHARunner::run(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { this->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], outputs[0], workspace, stream); } void UnfusedMHARunner::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { const int* maskIdx = static_cast<const int*>(maskPtr); PLUGIN_CUBLASASSERT(cublasSetStream(mCublas, stream)); // Q, K, V: BxNxSxH (inputs) // Q * K': BxNxSxS (-> scratch1) // P: BxNxSxS (-> scratch2) // P * V: BxNxSxH (output) if (mType == DataType::kHALF) { CublasConfigHelper helper(mCublas); const half* qptr = static_cast<const half*>(qkvPtr); const half* kptr = qptr + mHeadSize; const half* vptr = kptr + mHeadSize; half* qkptr = static_cast<half*>(workspace); half* pptr = qkptr + mOmatSize * mNumMats; half alpha = 1.f; half beta = 0.f; PLUGIN_CUBLASASSERT(::cublasGemmStridedBatchedEx(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mS, mS, mHeadSize, &alpha, kptr, CUDA_R_16F, mLdQKV, mStrideQKV, qptr, CUDA_R_16F, mLdQKV, mStrideQKV, &beta, qkptr, CUDA_R_16F, mS, mOmatSize, mNumMats, CUDA_R_16F, static_cast<cublasGemmAlgo_t>(mAlgoBatchedEx1))); // apply softmax if (maskIdx) { // if we have a mask computeMaskedScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr); } else { // if we don't have a mask computeScaledSoftmax<half>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr); } // compute P*V (as V*P) PLUGIN_CUBLASASSERT(cublasGemmStridedBatchedEx(mCublas, CUBLAS_OP_N, CUBLAS_OP_N, mHeadSize, mS, mS, &alpha, vptr, CUDA_R_16F, mLdQKV, mStrideQKV, pptr, CUDA_R_16F, mS, mOmatSize, &beta, output, CUDA_R_16F, mLdOut, mStrideOut, mNumMats, CUDA_R_16F, static_cast<cublasGemmAlgo_t>(mAlgoBatchedEx2))); } else { const float* qptr = static_cast<const float*>(qkvPtr); const float* kptr = qptr + mHeadSize; const float* vptr = kptr + mHeadSize; float* qkptr = static_cast<float*>(workspace); float* pptr = qkptr + mOmatSize * mNumMats; float* outptr = static_cast<float*>(output); PLUGIN_CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, CUBLAS_OP_T, CUBLAS_OP_N, mS, mS, mHeadSize, 1.f, kptr, mLdQKV, mStrideQKV, qptr, mLdQKV, mStrideQKV, 0.f, qkptr, mS, mOmatSize, mNumMats)); // apply softmax if (maskIdx) { // if we have a mask computeMaskedScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, maskIdx, qkptr, pptr); } else { // if we don't have a mask computeScaledSoftmax<float>(stream, mS, mB, mNumHeads, mRsqrtHeadSize, qkptr, pptr); } PLUGIN_CUBLASASSERT(cublasGemmStridedBatched<float>(mCublas, CUBLAS_OP_N, CUBLAS_OP_N, mHeadSize, mS, mS, 1.f, vptr, mLdQKV, mStrideQKV, pptr, mS, mOmatSize, 0.f, outptr, mLdOut, mStrideOut, mNumMats)); } } bool UnfusedMHARunner::isValid(int s) const { return mType != DataType::kINT8; } static inline void set_alpha(uint32_t& alpha, float norm, Data_type dtype) { if (dtype == DATA_TYPE_FP16) { half2 h2 = __float2half2_rn(norm); alpha = reinterpret_cast<const uint32_t&>(h2); } else if (dtype == DATA_TYPE_FP32) { alpha = reinterpret_cast<const uint32_t&>(norm); } else if (dtype == DATA_TYPE_INT32) { int32_t inorm = static_cast<int32_t>(norm); alpha = reinterpret_cast<const uint32_t&>(inorm); } else { assert(false); } } class FusedMHARunnerFP16::mhaImpl { public: mhaImpl(FusedMHARunnerFP16* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernels(DATA_TYPE_FP16, sm)) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { // check that we initialized assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { // TODO these implementation details might be better centralized into the XMMA code, since they are needed in // several places (also outside of this plugin) size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; if (S == 64 || S == 96 || S == 128) { warps_m = 2; warps_n = 2; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const float scale_bmm1 = interface->mRsqrtHeadSize; const float scale_softmax = 1.f; // Seems to be only required for int8 const float scale_bmm2 = 1.f; Data_type scale_type = DATA_TYPE_FP16; set_alpha(params.scale_bmm1, scale_bmm1, scale_type); set_alpha(params.scale_softmax, scale_softmax, scale_type); set_alpha(params.scale_bmm2, scale_bmm2, scale_type); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_FP16); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_FP16); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); params.packed_mask_ptr = const_cast<void*>(maskPtr); params.o_ptr = output; xmmaKernel->run(params, stream); PLUGIN_CHECK(cudaPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: FusedMHARunnerFP16* interface; Fused_multihead_attention_params params; int sm; const FusedMultiHeadAttentionXMMAKernel* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerFP16::FusedMHARunnerFP16(const int numHeads, const int headSize, const int sm) : MHARunner(DataType::kHALF, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerFP16::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerFP16::getWorkspaceSize() const { return 0; } void FusedMHARunnerFP16::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerFP16::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerFP16::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { assert(false && "not implemented"); } bool FusedMHARunnerFP16::isValid(int s) const { return pimpl->isValid(s); } // Int8 starts here: TODO refactor the duplicate stuff class FusedMHARunnerInt8::mhaImpl { public: mhaImpl(FusedMHARunnerInt8* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernels(DATA_TYPE_INT8, sm)) , mDqProbs(interface->mDqProbs) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; if (S == 128) { warps_m = 2; warps_n = 2; } else if (S == 384) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.qkv_stride_in_bytes = get_size_in_bytes(interface->mLdQKV, DATA_TYPE_INT8); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = get_size_in_bytes(interface->mLdOut, DATA_TYPE_INT8); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { float scaleQkv = inputDesc.scale; float scaleCtx = outputDesc.scale; float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize; float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx; float scaleSoftmax = 1.f / mDqProbs; params.scale_bmm1 = asUInt32(scaleBmm1); params.scale_bmm2 = asUInt32(scaleBmm2); params.scale_softmax = asUInt32(scaleSoftmax); params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f; params.qkv_ptr = const_cast<void*>(qkvPtr); params.packed_mask_ptr = const_cast<void*>(maskPtr); params.o_ptr = output; xmmaKernel->run(params, stream); PLUGIN_CHECK(cudaPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: float mDqProbs; FusedMHARunnerInt8* interface; Fused_multihead_attention_params params; int sm; const FusedMultiHeadAttentionXMMAKernel* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerInt8::FusedMHARunnerInt8(const int numHeads, const int headSize, const int sm, const float dqProbs) : MHARunner(DataType::kINT8, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) , mDqProbs(dqProbs) { } void FusedMHARunnerInt8::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerInt8::getWorkspaceSize() const { return 0; } void FusedMHARunnerInt8::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerInt8::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerInt8::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { assert(false && "not implemented"); } bool FusedMHARunnerInt8::isValid(int s) const { return pimpl->isValid(s); } class FusedMHARunnerFP16v2::mhaImpl { public: mhaImpl(FusedMHARunnerFP16v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_FP16, sm)) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_87 || sm == kSM_89 || sm == kSM_90) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { // check that we initialized assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { // TODO these implementation details might be better centralized into the XMMA code, since they are needed in // several places (also outside of this plugin) size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; // [MLPINF-1894] HGMMA has a different warp group. // TODO: add S==64/96/512 HGMMA support for sm==90 if (sm == kSM_90 && (S == 128 || S == 256 || S == 384)) { warps_m = 4; warps_n = 1; } else { if (S == 64 || S == 96 || S == 128) { warps_m = 2; warps_n = 2; } else if (S == 256 || S == 192) { warps_m = 1; warps_n = 4; } else if (S == 384 || S == 512) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupporte seqlen"); } } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const float scale_bmm1 = interface->mRsqrtHeadSize; const float scale_softmax = 1.f; // Seems to be only required for int8 const float scale_bmm2 = 1.f; Data_type scale_type = DATA_TYPE_FP16; set_alpha(params.scale_bmm1, scale_bmm1, scale_type); set_alpha(params.scale_softmax, scale_softmax, scale_type); set_alpha(params.scale_bmm2, scale_bmm2, scale_type); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; // mLdQKV = 3 * B * mNumHeads * mHeadSize; // mLdOut = B * mNumHeads * mHeadSize; params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(half); params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(half); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream) { params.qkv_ptr = const_cast<void*>(qkvPtr); // dummy input in V2/V3 because now we use cu_seqlens params.packed_mask_ptr = nullptr; params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); PLUGIN_CHECK(cudaPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: FusedMHARunnerFP16v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerFP16v2::FusedMHARunnerFP16v2(const int numHeads, const int headSize, const int sm) : MHARunner(DataType::kHALF, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) { } void FusedMHARunnerFP16v2::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerFP16v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerFP16v2::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerFP16v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { assert(false && "not implemented"); // pimpl->run(inputDesc, outputDesc, qkvPtr, maskPtr, output, workspace, stream); } void FusedMHARunnerFP16v2::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream); } bool FusedMHARunnerFP16v2::isValid(int s) const { return pimpl->isValid(s); } // Int8 starts here: TODO refactor the duplicate stuff class FusedMHARunnerInt8v2::mhaImpl { public: mhaImpl(FusedMHARunnerInt8v2* interface) : interface(interface) , sm(interface->mSm) , xmmaKernel(getXMMAKernelsV2(DATA_TYPE_INT8, sm)) , mDqProbs(interface->mDqProbs) , xmmas_m(0U) , xmmas_n(0U) , threads_per_cta(1U) { assert((sm == kSM_72 || sm == kSM_75 || sm == kSM_80 || sm == kSM_86 || sm == kSM_87 || sm == kSM_89 || sm == kSM_90) && "Unsupported architecture"); params.clear(); } ~mhaImpl() {} size_t getPackedMaskSizeInBytes() const { assert(xmmas_m > 0); assert(threads_per_cta > 0); assert(interface->mB > 0); return interface->mB * xmmas_m * threads_per_cta * sizeof(uint32_t); } void setup(const int S, const int B) { size_t warps_m{1U}; size_t warps_n{1U}; size_t warps_k{1U}; // [MLPINF-1894] IGMMA has a different warp group. // TODO: add S==64/96 IGMMA support for sm==90 if (sm == kSM_90 && (S == 128 || S == 192 || S == 256 || S == 384 || S == 512)) { if (S == 512) { warps_m = 4; warps_n = 2; } else { warps_m = 4; warps_n = 1; } } else { if (S == 128) { warps_m = 2; warps_n = 2; } else if (S == 256 || S == 192) { warps_m = 1; warps_n = 4; } else if (S == 384 || S == 512) { warps_m = 1; warps_n = 8; } else { assert(false && "Unsupported seqlen."); } } // The number of threads per CTA. threads_per_cta = warps_m * warps_n * warps_k * 32; // The number of xmmas in the M dimension. We use one uint32_t per XMMA in the M dimension. xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); // The number of xmmas in the N dimension. xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); params.b = B; params.h = interface->mNumHeads; params.s = S; params.d = interface->mHeadSize; params.use_int8_scale_max = interface->mUseInt8ScaleMax; params.packed_mask_stride_in_bytes = xmmas_m * threads_per_cta * sizeof(uint32_t); params.qkv_stride_in_bytes = 3 * interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); params.o_stride_in_bytes = interface->mNumHeads * interface->mHeadSize * sizeof(int8_t); } void run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, const void* cuSeqlenPtr, void* output, void* workspace, cudaStream_t stream) { float scaleQkv = inputDesc.scale; float scaleCtx = outputDesc.scale; float scaleBmm1 = scaleQkv * scaleQkv * interface->mRsqrtHeadSize; float scaleBmm2 = mDqProbs * scaleQkv / scaleCtx; float scaleSoftmax = 1.f / mDqProbs; params.scale_bmm1 = asUInt32(scaleBmm1); params.scale_bmm2 = asUInt32(scaleBmm2); params.scale_softmax = asUInt32(scaleSoftmax); params.enable_i2f_trick = -double(1 << 22) * double(scaleBmm2) <= -128.f && double(1 << 22) * double(scaleBmm2) >= 127.f; params.qkv_ptr = const_cast<void*>(qkvPtr); // dummy input in V2/V3 because now we use cu_seqlens params.packed_mask_ptr = nullptr; params.use_int8_scale_max = interface->mUseInt8ScaleMax; params.o_ptr = output; params.cu_seqlens = static_cast<int*>(const_cast<void*>(cuSeqlenPtr)); xmmaKernel->run(params, stream); PLUGIN_CHECK(cudaPeekAtLastError()); } bool isValid(int s) const { return xmmaKernel->isValid(s); } private: float mDqProbs; FusedMHARunnerInt8v2* interface; Fused_multihead_attention_params_v2 params; int sm; const FusedMultiHeadAttentionXMMAKernelV2* xmmaKernel; size_t xmmas_m; size_t xmmas_n; size_t threads_per_cta; }; FusedMHARunnerInt8v2::FusedMHARunnerInt8v2(const int numHeads, const int headSize, const int sm, const float dqProbs, bool const useInt8ScaleMax) : MHARunner(DataType::kINT8, numHeads, headSize) , mSm(sm) , pimpl(new mhaImpl(this)) , mDqProbs(dqProbs) , mUseInt8ScaleMax(useInt8ScaleMax) { } void FusedMHARunnerInt8v2::setup(const int S, const int B) { MHARunner::setup(S, B); pimpl->setup(S, B); } size_t FusedMHARunnerInt8v2::getWorkspaceSize() const { return 0; } void FusedMHARunnerInt8v2::deserialize(const void* data, size_t length) { MHARunner::deserialize(data, length); setup(mS, mB); } void FusedMHARunnerInt8v2::run(const PluginTensorDesc& inputDesc, const PluginTensorDesc& outputDesc, const void* qkvPtr, const void* maskPtr, void* output, void* workspace, cudaStream_t stream) { assert(false && "Not implemented"); } void FusedMHARunnerInt8v2::run(const nvinfer1::PluginTensorDesc* inputDesc, const nvinfer1::PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { pimpl->run(inputDesc[0], outputDesc[0], inputs[0], inputs[1], inputs[2], outputs[0], workspace, stream); } bool FusedMHARunnerInt8v2::isValid(int s) const { return pimpl->isValid(s); } } // namespace bert } // namespace plugin } // namespace nvinfer1
6f6ca6a6f8dc50f9c8985a425d50a9779a890593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "cvconfig.h" #ifdef HAVE_CUFFT #include <hipfft.h> #include "opencv2/core/cuda/common.hpp" namespace cv { namespace gpu { namespace cudev { ////////////////////////////////////////////////////////////////////////// // mulSpectrums __global__ void mulSpectrumsKernel(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); } } void mulSpectrums(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulSpectrumsKernel), dim3(grid), dim3(threads), 0, stream, a, b, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums_CONJ __global__ void mulSpectrumsKernel_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); } } void mulSpectrums_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulSpectrumsKernel_CONJ), dim3(grid), dim3(threads), 0, stream, a, b, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums __global__ void mulAndScaleSpectrumsKernel(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { hipfftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulAndScaleSpectrumsKernel), dim3(grid), dim3(threads), 0, stream, a, b, scale, c); cudaSafeCall( hipGetLastError() ); if (stream) cudaSafeCall( hipDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums_CONJ __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { hipfftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums_CONJ(const PtrStep<hipfftComplex> a, const PtrStep<hipfftComplex> b, float scale, PtrStepSz<hipfftComplex> c, hipStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); hipLaunchKernelGGL(( mulAndScaleSpectrumsKernel_CONJ), dim3(grid), dim3(threads), 0, stream, a, b, scale, c); cudaSafeCall( hipGetLastError() ); if (stream == 0) cudaSafeCall( hipDeviceSynchronize() ); } }}} // namespace cv { namespace gpu { namespace cudev #endif // HAVE_CUFFT #endif /* CUDA_DISABLER */
6f6ca6a6f8dc50f9c8985a425d50a9779a890593.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "cvconfig.h" #ifdef HAVE_CUFFT #include <cufft.h> #include "opencv2/core/cuda/common.hpp" namespace cv { namespace gpu { namespace cudev { ////////////////////////////////////////////////////////////////////////// // mulSpectrums __global__ void mulSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); } } void mulSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulSpectrums_CONJ __global__ void mulSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { c.ptr(y)[x] = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); } } void mulSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums __global__ void mulAndScaleSpectrumsKernel(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { cufftComplex v = cuCmulf(a.ptr(y)[x], b.ptr(y)[x]); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulAndScaleSpectrumsKernel<<<grid, threads, 0, stream>>>(a, b, scale, c); cudaSafeCall( cudaGetLastError() ); if (stream) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////////// // mulAndScaleSpectrums_CONJ __global__ void mulAndScaleSpectrumsKernel_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < c.cols && y < c.rows) { cufftComplex v = cuCmulf(a.ptr(y)[x], cuConjf(b.ptr(y)[x])); c.ptr(y)[x] = make_cuFloatComplex(cuCrealf(v) * scale, cuCimagf(v) * scale); } } void mulAndScaleSpectrums_CONJ(const PtrStep<cufftComplex> a, const PtrStep<cufftComplex> b, float scale, PtrStepSz<cufftComplex> c, cudaStream_t stream) { dim3 threads(256); dim3 grid(divUp(c.cols, threads.x), divUp(c.rows, threads.y)); mulAndScaleSpectrumsKernel_CONJ<<<grid, threads, 0, stream>>>(a, b, scale, c); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } }}} // namespace cv { namespace gpu { namespace cudev #endif // HAVE_CUFFT #endif /* CUDA_DISABLER */
2d62478d201773646514816f9c5e42a9f19d3bd0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "CHECK.h" #include "d_vecAdd.h" //use this as the size of your blocks (number of threads per block) #define BLOCKDIM 512 __global__ void d_vecAddKernel(float * d_A, float * d_B, float * d_C, int n); /* d_vecAdd Performs the vector add on the GPU (the device). A and B are pointers to two vectors to add together. The result is stored in the vector pointed to by C. n is the length of the vectors. returns the amount of time it takes to perform the vector add */ float d_vecAdd(float* A, float* B, float* C, int n) { float gpuMsecTime; hipEvent_t start_gpu, stop_gpu; //time the sum of the two vectors CHECK(hipEventCreate(&start_gpu)); CHECK(hipEventCreate(&stop_gpu)); CHECK(hipEventRecord(start_gpu)); //missing code goes here //1) create vectors on the device //2) copy A and B vectors into device vectors //3) launch the kernel //4) copy the result vector into the C vector //5) free space allocated for vectors on the device //Don't forget to use the CHECK macro on your cuda calls CHECK(hipEventRecord(stop_gpu)); CHECK(hipEventSynchronize(stop_gpu)); CHECK(hipEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu)); return gpuMsecTime; } /* d_vecAddKernel This function contains the kernel code. This code will be executed by every thread created by the kernel launch. d_A and d_B are pointers to two vectors on the device to add together. The result is stored in the vector pointed to by d_C. n is the length of the vectors. */ __global__ void d_vecAddKernel(float * d_A, float * d_B, float * d_C, int n) { //add the missing body }
2d62478d201773646514816f9c5e42a9f19d3bd0.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include "CHECK.h" #include "d_vecAdd.h" //use this as the size of your blocks (number of threads per block) #define BLOCKDIM 512 __global__ void d_vecAddKernel(float * d_A, float * d_B, float * d_C, int n); /* d_vecAdd Performs the vector add on the GPU (the device). A and B are pointers to two vectors to add together. The result is stored in the vector pointed to by C. n is the length of the vectors. returns the amount of time it takes to perform the vector add */ float d_vecAdd(float* A, float* B, float* C, int n) { float gpuMsecTime; cudaEvent_t start_gpu, stop_gpu; //time the sum of the two vectors CHECK(cudaEventCreate(&start_gpu)); CHECK(cudaEventCreate(&stop_gpu)); CHECK(cudaEventRecord(start_gpu)); //missing code goes here //1) create vectors on the device //2) copy A and B vectors into device vectors //3) launch the kernel //4) copy the result vector into the C vector //5) free space allocated for vectors on the device //Don't forget to use the CHECK macro on your cuda calls CHECK(cudaEventRecord(stop_gpu)); CHECK(cudaEventSynchronize(stop_gpu)); CHECK(cudaEventElapsedTime(&gpuMsecTime, start_gpu, stop_gpu)); return gpuMsecTime; } /* d_vecAddKernel This function contains the kernel code. This code will be executed by every thread created by the kernel launch. d_A and d_B are pointers to two vectors on the device to add together. The result is stored in the vector pointed to by d_C. n is the length of the vectors. */ __global__ void d_vecAddKernel(float * d_A, float * d_B, float * d_C, int n) { //add the missing body }
NCV.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. * Third party copyrights are property of their respective owners. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * Ported to PCL by Koen Buys : Attention Work in progress! */ #include <iostream> #include <string> #include <vector> #include <algorithm> #include "NCV.hpp" //============================================================================== // // Error handling helpers // //============================================================================== static void stdDebugOutput(const std::string &msg) { std::cout << msg; } static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput; void ncvDebugOutput(const std::string &msg) { debugOutputHandler(msg); } void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func) { debugOutputHandler = func; } //============================================================================== // // Memory wrappers and helpers // //============================================================================== Ncv32u alignUp(Ncv32u what, Ncv32u alignment) { Ncv32u alignMask = alignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u res = (what + alignMask) & inverseAlignMask; return res; } void NCVMemPtr::clear() { ptr = NULL; memtype = NCVMemoryTypeNone; } void NCVMemSegment::clear() { begin.clear(); size = 0; } NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, std::size_t sz, hipStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: memcpy(dst, src, sz); ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpyAsync(dst, src, sz, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy(dst, src, sz, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType, const void *src, Ncv32u srcPitch, NCVMemoryType srcType, Ncv32u widthbytes, Ncv32u height, hipStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: for (Ncv32u i=0; i<height; i++) { memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(hipMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(hipMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, hipMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } //=================================================================== // // NCVMemStackAllocator class members implementation // //=================================================================== NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment) : currentSize(0), _maxSize(0), allocBegin(NULL), begin(NULL), end(NULL), _memType(NCVMemoryTypeNone), _alignment(alignment), bReusesMemory(false) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2"); } NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, std::size_t capacity, Ncv32u alignment, void *reusePtr) : currentSize(0), _maxSize(0), allocBegin(NULL), _memType(memT), _alignment(alignment) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2"); ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type"); allocBegin = NULL; if (reusePtr == NULL && capacity != 0) { bReusesMemory = false; switch (memT) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(hipMalloc(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(hipHostMalloc(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPageable: allocBegin = (Ncv8u *)malloc(capacity); break; default:; } } else { bReusesMemory = true; allocBegin = (Ncv8u *)reusePtr; } if (capacity == 0) { allocBegin = (Ncv8u *)(0x1); } if (!isCounting()) { begin = allocBegin; end = begin + capacity; } } NCVMemStackAllocator::~NCVMemStackAllocator() { if (allocBegin != NULL) { ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction"); if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1))) { switch (_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(hipFree(allocBegin), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(hipHostFree(allocBegin), ); break; case NCVMemoryTypeHostPageable: free(allocBegin); break; default:; } } allocBegin = NULL; } } NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, std::size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); size = alignUp(size, this->_alignment); this->currentSize += size; this->_maxSize = max(this->_maxSize, this->currentSize); if (!isCounting()) { std::size_t availSize = end - begin; ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY); } seg.begin.ptr = begin; seg.begin.memtype = this->_memType; seg.size = size; begin += size; return NCV_SUCCESS; } NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER); currentSize -= seg.size; begin -= seg.size; seg.clear(); ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC); return NCV_SUCCESS; } NcvBool NCVMemStackAllocator::isInitialized(void) const { return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL; } NcvBool NCVMemStackAllocator::isCounting(void) const { return this->_memType == NCVMemoryTypeNone; } NCVMemoryType NCVMemStackAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemStackAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemStackAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // NCVMemNativeAllocator class members implementation // //=================================================================== NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment) : currentSize(0), _maxSize(0), _memType(memT), _alignment(alignment) { ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", ); } NCVMemNativeAllocator::~NCVMemNativeAllocator() { ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak"); } NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, std::size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(hipMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(hipHostMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: seg.begin.ptr = (Ncv8u *)malloc(size); break; default:; } this->currentSize += alignUp(size, this->_alignment); this->_maxSize = max(this->_maxSize, this->currentSize); seg.begin.memtype = this->_memType; seg.size = size; return NCV_SUCCESS; } NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC); currentSize -= alignUp(seg.size, this->_alignment); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(hipFree(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(hipHostFree(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: free(seg.begin.ptr); break; default:; } seg.clear(); return NCV_SUCCESS; } NcvBool NCVMemNativeAllocator::isInitialized(void) const { return (this->_alignment != 0); } NcvBool NCVMemNativeAllocator::isCounting(void) const { return false; } NCVMemoryType NCVMemNativeAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemNativeAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemNativeAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // Operations with rectangles // //=================================================================== template <class T> static NCVStatus drawRectsWrapperHost(T *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, T color) { ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects != 0, NCV_SUCCESS); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); for (Ncv32u i=0; i<numRects; i++) { NcvRect32u rect = h_rects[i]; if (rect.x < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x] = color; } } if (rect.x+rect.width-1 < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x+rect.width-1] = color; } } if (rect.y < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[rect.y*dstStride+j] = color; } } if (rect.y + rect.height - 1 < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[(rect.y+rect.height-1)*dstStride+j] = color; } } } return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv8u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv32u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } const Ncv32u NUMTHREADS_DRAWRECTS = 32; const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5; template <class T> __global__ void drawRects(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; if (blockId > numRects * 4) { return; } NcvRect32u curRect = d_rects[blockId >> 2]; NcvBool bVertical = blockId & 0x1; NcvBool bTopLeft = blockId & 0x2; Ncv32u pt0x, pt0y; if (bVertical) { Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1; pt0y = curRect.y; if (pt0x < dstWidth) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptY < pt0y + curRect.height && ptY < dstHeight) { d_dst[ptY * dstStride + pt0x] = color; } } } } else { Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = curRect.x; pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1; if (pt0y < dstHeight) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptX < pt0x + curRect.width && ptX < dstWidth) { d_dst[pt0y * dstStride + ptX] = color; } } } } } template <class T> static NCVStatus drawRectsWrapperDevice(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color, hipStream_t cuStream) { ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); if (numRects == 0) { return NCV_SUCCESS; } dim3 grid(numRects * 4); dim3 block(NUMTHREADS_DRAWRECTS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( drawRects<T>), dim3(grid), dim3(block), 0, 0, d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv8u color, hipStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); } NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv32u color, hipStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); }
NCV.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (C) 2009-2010, NVIDIA Corporation, all rights reserved. * Third party copyrights are property of their respective owners. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * $Id: $ * Ported to PCL by Koen Buys : Attention Work in progress! */ #include <iostream> #include <string> #include <vector> #include <algorithm> #include "NCV.hpp" //============================================================================== // // Error handling helpers // //============================================================================== static void stdDebugOutput(const std::string &msg) { std::cout << msg; } static NCVDebugOutputHandler *debugOutputHandler = stdDebugOutput; void ncvDebugOutput(const std::string &msg) { debugOutputHandler(msg); } void ncvSetDebugOutputHandler(NCVDebugOutputHandler *func) { debugOutputHandler = func; } //============================================================================== // // Memory wrappers and helpers // //============================================================================== Ncv32u alignUp(Ncv32u what, Ncv32u alignment) { Ncv32u alignMask = alignment-1; Ncv32u inverseAlignMask = ~alignMask; Ncv32u res = (what + alignMask) & inverseAlignMask; return res; } void NCVMemPtr::clear() { ptr = NULL; memtype = NCVMemoryTypeNone; } void NCVMemSegment::clear() { begin.clear(); size = 0; } NCVStatus memSegCopyHelper(void *dst, NCVMemoryType dstType, const void *src, NCVMemoryType srcType, std::size_t sz, cudaStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: memcpy(dst, src, sz); ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpyAsync(dst, src, sz, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy(dst, src, sz, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } NCVStatus memSegCopyHelper2D(void *dst, Ncv32u dstPitch, NCVMemoryType dstType, const void *src, Ncv32u srcPitch, NCVMemoryType srcType, Ncv32u widthbytes, Ncv32u height, cudaStream_t cuStream) { NCVStatus ncvStat; switch (dstType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: for (Ncv32u i=0; i<height; i++) { memcpy((char*)dst + i * dstPitch, (char*)src + i * srcPitch, widthbytes); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToHost), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; case NCVMemoryTypeDevice: switch (srcType) { case NCVMemoryTypeHostPageable: case NCVMemoryTypeHostPinned: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyHostToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; case NCVMemoryTypeDevice: if (cuStream != 0) { ncvAssertCUDAReturn(cudaMemcpy2DAsync(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice, cuStream), NCV_CUDA_ERROR); } else { ncvAssertCUDAReturn(cudaMemcpy2D(dst, dstPitch, src, srcPitch, widthbytes, height, cudaMemcpyDeviceToDevice), NCV_CUDA_ERROR); } ncvStat = NCV_SUCCESS; break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } break; default: ncvStat = NCV_MEM_RESIDENCE_ERROR; } return ncvStat; } //=================================================================== // // NCVMemStackAllocator class members implementation // //=================================================================== NCVMemStackAllocator::NCVMemStackAllocator(Ncv32u alignment) : currentSize(0), _maxSize(0), allocBegin(NULL), begin(NULL), end(NULL), _memType(NCVMemoryTypeNone), _alignment(alignment), bReusesMemory(false) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: alignment not power of 2"); } NCVMemStackAllocator::NCVMemStackAllocator(NCVMemoryType memT, std::size_t capacity, Ncv32u alignment, void *reusePtr) : currentSize(0), _maxSize(0), allocBegin(NULL), _memType(memT), _alignment(alignment) { NcvBool bProperAlignment = (alignment & (alignment-1)) == 0; ncvAssertPrintCheck(bProperAlignment, "NCVMemStackAllocator ctor:: _alignment not power of 2"); ncvAssertPrintCheck(memT != NCVMemoryTypeNone, "NCVMemStackAllocator ctor:: Incorrect allocator type"); allocBegin = NULL; if (reusePtr == NULL && capacity != 0) { bReusesMemory = false; switch (memT) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaMalloc(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaMallocHost(&allocBegin, capacity), ); break; case NCVMemoryTypeHostPageable: allocBegin = (Ncv8u *)malloc(capacity); break; default:; } } else { bReusesMemory = true; allocBegin = (Ncv8u *)reusePtr; } if (capacity == 0) { allocBegin = (Ncv8u *)(0x1); } if (!isCounting()) { begin = allocBegin; end = begin + capacity; } } NCVMemStackAllocator::~NCVMemStackAllocator() { if (allocBegin != NULL) { ncvAssertPrintCheck(currentSize == 0, "NCVMemStackAllocator dtor:: not all objects were deallocated properly, forcing destruction"); if (!bReusesMemory && (allocBegin != (Ncv8u *)(0x1))) { switch (_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaFree(allocBegin), ); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaFreeHost(allocBegin), ); break; case NCVMemoryTypeHostPageable: free(allocBegin); break; default:; } } allocBegin = NULL; } } NCVStatus NCVMemStackAllocator::alloc(NCVMemSegment &seg, std::size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); size = alignUp(size, this->_alignment); this->currentSize += size; this->_maxSize = max(this->_maxSize, this->currentSize); if (!isCounting()) { std::size_t availSize = end - begin; ncvAssertReturn(size <= availSize, NCV_ALLOCATOR_INSUFFICIENT_CAPACITY); } seg.begin.ptr = begin; seg.begin.memtype = this->_memType; seg.size = size; begin += size; return NCV_SUCCESS; } NCVStatus NCVMemStackAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL || isCounting(), NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr == begin - seg.size, NCV_ALLOCATOR_DEALLOC_ORDER); currentSize -= seg.size; begin -= seg.size; seg.clear(); ncvAssertReturn(allocBegin <= begin, NCV_ALLOCATOR_BAD_DEALLOC); return NCV_SUCCESS; } NcvBool NCVMemStackAllocator::isInitialized(void) const { return ((this->_alignment & (this->_alignment-1)) == 0) && isCounting() || this->allocBegin != NULL; } NcvBool NCVMemStackAllocator::isCounting(void) const { return this->_memType == NCVMemoryTypeNone; } NCVMemoryType NCVMemStackAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemStackAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemStackAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // NCVMemNativeAllocator class members implementation // //=================================================================== NCVMemNativeAllocator::NCVMemNativeAllocator(NCVMemoryType memT, Ncv32u alignment) : currentSize(0), _maxSize(0), _memType(memT), _alignment(alignment) { ncvAssertPrintReturn(memT != NCVMemoryTypeNone, "NCVMemNativeAllocator ctor:: counting not permitted for this allocator type", ); } NCVMemNativeAllocator::~NCVMemNativeAllocator() { ncvAssertPrintCheck(currentSize == 0, "NCVMemNativeAllocator dtor:: detected memory leak"); } NCVStatus NCVMemNativeAllocator::alloc(NCVMemSegment &seg, std::size_t size) { seg.clear(); ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaMalloc(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaMallocHost(&seg.begin.ptr, size), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: seg.begin.ptr = (Ncv8u *)malloc(size); break; default:; } this->currentSize += alignUp(size, this->_alignment); this->_maxSize = max(this->_maxSize, this->currentSize); seg.begin.memtype = this->_memType; seg.size = size; return NCV_SUCCESS; } NCVStatus NCVMemNativeAllocator::dealloc(NCVMemSegment &seg) { ncvAssertReturn(isInitialized(), NCV_ALLOCATOR_BAD_ALLOC); ncvAssertReturn(seg.begin.memtype == this->_memType, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(seg.begin.ptr != NULL, NCV_ALLOCATOR_BAD_DEALLOC); ncvAssertReturn(currentSize >= alignUp(seg.size, this->_alignment), NCV_ALLOCATOR_BAD_DEALLOC); currentSize -= alignUp(seg.size, this->_alignment); switch (this->_memType) { case NCVMemoryTypeDevice: ncvAssertCUDAReturn(cudaFree(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPinned: ncvAssertCUDAReturn(cudaFreeHost(seg.begin.ptr), NCV_CUDA_ERROR); break; case NCVMemoryTypeHostPageable: free(seg.begin.ptr); break; default:; } seg.clear(); return NCV_SUCCESS; } NcvBool NCVMemNativeAllocator::isInitialized(void) const { return (this->_alignment != 0); } NcvBool NCVMemNativeAllocator::isCounting(void) const { return false; } NCVMemoryType NCVMemNativeAllocator::memType(void) const { return this->_memType; } Ncv32u NCVMemNativeAllocator::alignment(void) const { return this->_alignment; } size_t NCVMemNativeAllocator::maxSize(void) const { return this->_maxSize; } //=================================================================== // // Operations with rectangles // //=================================================================== template <class T> static NCVStatus drawRectsWrapperHost(T *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, T color) { ncvAssertReturn(h_dst != NULL && h_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects != 0, NCV_SUCCESS); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); for (Ncv32u i=0; i<numRects; i++) { NcvRect32u rect = h_rects[i]; if (rect.x < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x] = color; } } if (rect.x+rect.width-1 < dstWidth) { for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++) { h_dst[i*dstStride+rect.x+rect.width-1] = color; } } if (rect.y < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[rect.y*dstStride+j] = color; } } if (rect.y + rect.height - 1 < dstHeight) { for (Ncv32u j=rect.x; j<rect.x+rect.width && j<dstWidth; j++) { h_dst[(rect.y+rect.height-1)*dstStride+j] = color; } } } return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_host(Ncv8u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv8u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } NCVStatus ncvDrawRects_32u_host(Ncv32u *h_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *h_rects, Ncv32u numRects, Ncv32u color) { return drawRectsWrapperHost(h_dst, dstStride, dstWidth, dstHeight, h_rects, numRects, color); } const Ncv32u NUMTHREADS_DRAWRECTS = 32; const Ncv32u NUMTHREADS_DRAWRECTS_LOG2 = 5; template <class T> __global__ void drawRects(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; if (blockId > numRects * 4) { return; } NcvRect32u curRect = d_rects[blockId >> 2]; NcvBool bVertical = blockId & 0x1; NcvBool bTopLeft = blockId & 0x2; Ncv32u pt0x, pt0y; if (bVertical) { Ncv32u numChunks = (curRect.height + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = bTopLeft ? curRect.x : curRect.x + curRect.width - 1; pt0y = curRect.y; if (pt0x < dstWidth) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptY = pt0y + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptY < pt0y + curRect.height && ptY < dstHeight) { d_dst[ptY * dstStride + pt0x] = color; } } } } else { Ncv32u numChunks = (curRect.width + NUMTHREADS_DRAWRECTS - 1) >> NUMTHREADS_DRAWRECTS_LOG2; pt0x = curRect.x; pt0y = bTopLeft ? curRect.y : curRect.y + curRect.height - 1; if (pt0y < dstHeight) { for (Ncv32u chunkId = 0; chunkId < numChunks; chunkId++) { Ncv32u ptX = pt0x + chunkId * NUMTHREADS_DRAWRECTS + threadIdx.x; if (ptX < pt0x + curRect.width && ptX < dstWidth) { d_dst[pt0y * dstStride + ptX] = color; } } } } } template <class T> static NCVStatus drawRectsWrapperDevice(T *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, T color, cudaStream_t cuStream) { ncvAssertReturn(d_dst != NULL && d_rects != NULL, NCV_NULL_PTR); ncvAssertReturn(dstWidth > 0 && dstHeight > 0, NCV_DIMENSIONS_INVALID); ncvAssertReturn(dstStride >= dstWidth, NCV_INVALID_STEP); ncvAssertReturn(numRects <= dstWidth * dstHeight, NCV_DIMENSIONS_INVALID); if (numRects == 0) { return NCV_SUCCESS; } dim3 grid(numRects * 4); dim3 block(NUMTHREADS_DRAWRECTS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } drawRects<T><<<grid, block>>>(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color); ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR); return NCV_SUCCESS; } NCVStatus ncvDrawRects_8u_device(Ncv8u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv8u color, cudaStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); } NCVStatus ncvDrawRects_32u_device(Ncv32u *d_dst, Ncv32u dstStride, Ncv32u dstWidth, Ncv32u dstHeight, NcvRect32u *d_rects, Ncv32u numRects, Ncv32u color, cudaStream_t cuStream) { return drawRectsWrapperDevice(d_dst, dstStride, dstWidth, dstHeight, d_rects, numRects, color, cuStream); }
ac9ba017fd5df06665fc248febce94e54bc8196d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #define N (4096*4096) #define THREADS_PER_BLOCK 512 __global__ void sumOnGpu(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ c[index] = a[index] + b[index]; } } void sumOnCpu(int *a, int *b, int *c, int n){ for(int i = 0; i < n; i++){ c[i] = a[i] + b[i]; } } void randomNumbers(int *a, int n){ for(int i = 0; i < n; i++){ a[i] = rand()%100000; } } int compare(int *a, int *b, int n){ int pass = 1; for(int i = 0; i < n; i++){ if(a[i] != b[i]){ printf("Different values at a[%i] = %i and b[%i] = %i \n", i, a[i], i, b[i]); pass = 0; } } if (pass) printf("args are the same \n"); else printf("args are different \n"); return pass; } int main(){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); hipMalloc((void**)&d_a, size); hipMalloc((void**)&d_b, size); hipMalloc((void**)&d_c, size); randomNumbers(a, N); randomNumbers(b, N); hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); clock_t timeOnGpu = clock(); hipLaunchKernelGGL(( sumOnGpu), dim3(N/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, N); printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC); hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); int* c_h; c_h = (int*)malloc(size); clock_t i = clock(); sumOnCpu(a, b, c_h, N); printf("time on CPU %f \n", ((double)clock() - i)/CLOCKS_PER_SEC); compare(c, c_h, N); free(a); free(b); free(c); free(c_h); hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
ac9ba017fd5df06665fc248febce94e54bc8196d.cu
#include "cuda_runtime.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #define N (4096*4096) #define THREADS_PER_BLOCK 512 __global__ void sumOnGpu(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x * blockDim.x; if(index < n){ c[index] = a[index] + b[index]; } } void sumOnCpu(int *a, int *b, int *c, int n){ for(int i = 0; i < n; i++){ c[i] = a[i] + b[i]; } } void randomNumbers(int *a, int n){ for(int i = 0; i < n; i++){ a[i] = rand()%100000; } } int compare(int *a, int *b, int n){ int pass = 1; for(int i = 0; i < n; i++){ if(a[i] != b[i]){ printf("Different values at a[%i] = %i and b[%i] = %i \n", i, a[i], i, b[i]); pass = 0; } } if (pass) printf("args are the same \n"); else printf("args are different \n"); return pass; } int main(){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); randomNumbers(a, N); randomNumbers(b, N); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); clock_t timeOnGpu = clock(); sumOnGpu<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N); printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); int* c_h; c_h = (int*)malloc(size); clock_t i = clock(); sumOnCpu(a, b, c_h, N); printf("time on CPU %f \n", ((double)clock() - i)/CLOCKS_PER_SEC); compare(c, c_h, N); free(a); free(b); free(c); free(c_h); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
14125b0827efab5fd3f1cc414042afa9463f3bd4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"softMaxLayer.h" void softMaxLayer::createHandles() { checkCUDNN(cudnnCreateTensorDescriptor(&srcTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&dstTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&srcDiffTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&dstDiffTensorDesc)); } /*get the datasize and label*/ void softMaxLayer::GetDataSize_BatchLabel() { dataLayer* data_Layer = (dataLayer*) Layers::instanceObject()->getLayer("data"); dataSize = data_Layer->getDataSize(); srcLabel = data_Layer->getDataLabel(); } /*constructor*/ softMaxLayer::softMaxLayer(string name) { _name = name; _inputName = " "; srcData = NULL; dstData = NULL; srcDiff = NULL; diffData = NULL; devLabel = NULL; srcDiff = NULL; host_result = NULL; dataSize = 0; srcLabel = NULL; nextLayer.clear(); prevLayer.clear(); flag = 0; CorrectSize = 0; cur_correctSize = 0; configSoftMax* curConfig = (configSoftMax*) config::instanceObjtce()->getLayersByName(_name); string prevLayerName = curConfig->_input; layersBase* prev_Layer =(layersBase*) Layers::instanceObject()->getLayer(prevLayerName); batchSize = config::instanceObjtce()->get_batchSize(); inputSize = prev_Layer->getOutputSize(); nclasses = curConfig->_nclasses; lambda = curConfig->_weight_decay; outputSize = nclasses; inputAmount = prev_Layer->channels; inputImageDim = prev_Layer->height; number = prev_Layer->number; channels = prev_Layer->channels; height = prev_Layer->height; width = prev_Layer->width; host_result = (float*) MemoryMonitor::instanceObject()->cpuMallocMemory(number * channels * height * width *sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &diffData, number * channels * height * width * sizeof(float)); this->createHandles(); } //deep copy constructor softMaxLayer::softMaxLayer(softMaxLayer* layer) { srcData = NULL; dstData = NULL; srcDiff = NULL; diffData = NULL; devLabel = NULL; srcDiff = NULL; host_result = NULL; dataSize = 0; srcLabel = NULL; nextLayer.clear(); prevLayer.clear(); flag = 0; CorrectSize = 0; cur_correctSize = 0; static int idx = 0; _name = layer->_name + string("_") + int_to_string(idx); idx ++; _inputName = layer->_inputName; batchSize = layer->batchSize; inputSize = layer->inputSize; nclasses = layer->nclasses; lambda = layer->lambda; outputSize = layer->outputSize; inputAmount = layer->inputAmount; inputImageDim = layer->inputImageDim; number = layer->number; channels = layer->channels; height = layer->height; width = layer->width; host_result = (float*) MemoryMonitor::instanceObject()->cpuMallocMemory(number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &diffData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->cpu2cpu(host_result, layer->host_result, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(srcDiff, layer->srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(devLabel, layer->devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpu2gpu(dstData, layer->dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(diffData, layer->diffData, number * channels * height * width * sizeof(float)); this->createHandles(); } /*classification results*/ void softMaxLayer::ClassificationResults() { if(flag == 0) { cur_correctSize = dataSize; } const int max_digit = nclasses; checkCudaErrors(hipMemcpy(host_result, dstData, number * channels * height * width * sizeof(float),hipMemcpyDeviceToHost)); int temp = ((number <= dataSize - flag) ? number : (dataSize-flag)); //printf("temp%d\n", temp); for(int i = 0; i < temp; i++) { float max = host_result[i * max_digit]; int labelIndex = 0; for(int j = 1; j < max_digit;j++) { if(max < host_result[i * max_digit + j]) { max = host_result[i * max_digit + j]; labelIndex = j; } } flag++; if(srcLabel[i] != labelIndex) --cur_correctSize; } if(flag == dataSize) { cout<< _name << " " << cur_correctSize << "/" << CorrectSize <<" "; if(cur_correctSize > CorrectSize) { CorrectSize = cur_correctSize; //saveNetWork(); } flag = 0; } } void softMaxLayer::forwardPropagation(string train_or_test) { GetDataSize_BatchLabel(); srcData = prevLayer[0]->dstData; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); float alpha = 1.0; float beta = 0.0; checkCUDNN(cudnnSoftmaxForward(cuDNN_netWork<float>::instanceObject()->GetcudnnHandle(), CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, srcTensorDesc, srcData, &beta, dstTensorDesc, dstData)); if(train_or_test == "test" ) ClassificationResults(); } __global__ void SoftmaxLossBackprop(const int* label, int num_labels, int batch_size, float* diffData) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = label[idx]; // For each item in the batch, decrease the result of the label's value by 1 diffData[idx * num_labels + label_value] -= 1.0f; } /*compute diff*/ void softMaxLayer::getBackPropDiffData() { checkCudaErrors(hipMemcpy(devLabel, srcLabel, batchSize * 1 * 1 * 1 * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(srcDiff, dstData, number * channels * height * width * sizeof(float), hipMemcpyDeviceToDevice)); hipLaunchKernelGGL(( SoftmaxLossBackprop), dim3((batchSize + 127)/128), dim3(128), 0, 0, devLabel, nclasses, batchSize, srcDiff); hipDeviceSynchronize(); } void softMaxLayer::backwardPropagation(float Momentum) { getBackPropDiffData(); checkCUDNN(cudnnSetTensor4dDescriptor(srcDiffTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); checkCUDNN(cudnnSetTensor4dDescriptor(dstDiffTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); float alpha = 1.0f; float beta = 0.0f; /*computes the gridient of the softmax*/ checkCUDNN(cudnnSoftmaxBackward(cuDNN_netWork<float>::instanceObject()->GetcudnnHandle(), CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dstTensorDesc, dstData, srcDiffTensorDesc, srcDiff, &beta, dstDiffTensorDesc, diffData)); } void softMaxLayer:: destroyHandles() { checkCUDNN(cudnnDestroyTensorDescriptor(srcTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(dstTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(srcDiffTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(dstDiffTensorDesc)); }
14125b0827efab5fd3f1cc414042afa9463f3bd4.cu
#include"softMaxLayer.h" void softMaxLayer::createHandles() { checkCUDNN(cudnnCreateTensorDescriptor(&srcTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&dstTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&srcDiffTensorDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&dstDiffTensorDesc)); } /*get the datasize and label*/ void softMaxLayer::GetDataSize_BatchLabel() { dataLayer* data_Layer = (dataLayer*) Layers::instanceObject()->getLayer("data"); dataSize = data_Layer->getDataSize(); srcLabel = data_Layer->getDataLabel(); } /*constructor*/ softMaxLayer::softMaxLayer(string name) { _name = name; _inputName = " "; srcData = NULL; dstData = NULL; srcDiff = NULL; diffData = NULL; devLabel = NULL; srcDiff = NULL; host_result = NULL; dataSize = 0; srcLabel = NULL; nextLayer.clear(); prevLayer.clear(); flag = 0; CorrectSize = 0; cur_correctSize = 0; configSoftMax* curConfig = (configSoftMax*) config::instanceObjtce()->getLayersByName(_name); string prevLayerName = curConfig->_input; layersBase* prev_Layer =(layersBase*) Layers::instanceObject()->getLayer(prevLayerName); batchSize = config::instanceObjtce()->get_batchSize(); inputSize = prev_Layer->getOutputSize(); nclasses = curConfig->_nclasses; lambda = curConfig->_weight_decay; outputSize = nclasses; inputAmount = prev_Layer->channels; inputImageDim = prev_Layer->height; number = prev_Layer->number; channels = prev_Layer->channels; height = prev_Layer->height; width = prev_Layer->width; host_result = (float*) MemoryMonitor::instanceObject()->cpuMallocMemory(number * channels * height * width *sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &diffData, number * channels * height * width * sizeof(float)); this->createHandles(); } //deep copy constructor softMaxLayer::softMaxLayer(softMaxLayer* layer) { srcData = NULL; dstData = NULL; srcDiff = NULL; diffData = NULL; devLabel = NULL; srcDiff = NULL; host_result = NULL; dataSize = 0; srcLabel = NULL; nextLayer.clear(); prevLayer.clear(); flag = 0; CorrectSize = 0; cur_correctSize = 0; static int idx = 0; _name = layer->_name + string("_") + int_to_string(idx); idx ++; _inputName = layer->_inputName; batchSize = layer->batchSize; inputSize = layer->inputSize; nclasses = layer->nclasses; lambda = layer->lambda; outputSize = layer->outputSize; inputAmount = layer->inputAmount; inputImageDim = layer->inputImageDim; number = layer->number; channels = layer->channels; height = layer->height; width = layer->width; host_result = (float*) MemoryMonitor::instanceObject()->cpuMallocMemory(number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpuMallocMemory((void**) &diffData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->cpu2cpu(host_result, layer->host_result, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(srcDiff, layer->srcDiff, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(devLabel, layer->devLabel, batchSize * 1 * 1 * 1 * sizeof(int)); MemoryMonitor::instanceObject()->gpu2gpu(dstData, layer->dstData, number * channels * height * width * sizeof(float)); MemoryMonitor::instanceObject()->gpu2gpu(diffData, layer->diffData, number * channels * height * width * sizeof(float)); this->createHandles(); } /*classification results*/ void softMaxLayer::ClassificationResults() { if(flag == 0) { cur_correctSize = dataSize; } const int max_digit = nclasses; checkCudaErrors(cudaMemcpy(host_result, dstData, number * channels * height * width * sizeof(float),cudaMemcpyDeviceToHost)); int temp = ((number <= dataSize - flag) ? number : (dataSize-flag)); //printf("temp%d\n", temp); for(int i = 0; i < temp; i++) { float max = host_result[i * max_digit]; int labelIndex = 0; for(int j = 1; j < max_digit;j++) { if(max < host_result[i * max_digit + j]) { max = host_result[i * max_digit + j]; labelIndex = j; } } flag++; if(srcLabel[i] != labelIndex) --cur_correctSize; } if(flag == dataSize) { cout<< _name << " " << cur_correctSize << "/" << CorrectSize <<" "; if(cur_correctSize > CorrectSize) { CorrectSize = cur_correctSize; //saveNetWork(); } flag = 0; } } void softMaxLayer::forwardPropagation(string train_or_test) { GetDataSize_BatchLabel(); srcData = prevLayer[0]->dstData; checkCUDNN(cudnnSetTensor4dDescriptor(srcTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); checkCUDNN(cudnnSetTensor4dDescriptor(dstTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); float alpha = 1.0; float beta = 0.0; checkCUDNN(cudnnSoftmaxForward(cuDNN_netWork<float>::instanceObject()->GetcudnnHandle(), CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, srcTensorDesc, srcData, &beta, dstTensorDesc, dstData)); if(train_or_test == "test" ) ClassificationResults(); } __global__ void SoftmaxLossBackprop(const int* label, int num_labels, int batch_size, float* diffData) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = label[idx]; // For each item in the batch, decrease the result of the label's value by 1 diffData[idx * num_labels + label_value] -= 1.0f; } /*compute diff*/ void softMaxLayer::getBackPropDiffData() { checkCudaErrors(cudaMemcpy(devLabel, srcLabel, batchSize * 1 * 1 * 1 * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(srcDiff, dstData, number * channels * height * width * sizeof(float), cudaMemcpyDeviceToDevice)); SoftmaxLossBackprop<<< (batchSize + 127)/128, 128>>>(devLabel, nclasses, batchSize, srcDiff); cudaThreadSynchronize(); } void softMaxLayer::backwardPropagation(float Momentum) { getBackPropDiffData(); checkCUDNN(cudnnSetTensor4dDescriptor(srcDiffTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); checkCUDNN(cudnnSetTensor4dDescriptor(dstDiffTensorDesc, cuDNN_netWork<float>::instanceObject()->GetTensorFormat(), cuDNN_netWork<float>::instanceObject()->GetDataType(), number, channels, height, width)); float alpha = 1.0f; float beta = 0.0f; /*computes the gridient of the softmax*/ checkCUDNN(cudnnSoftmaxBackward(cuDNN_netWork<float>::instanceObject()->GetcudnnHandle(), CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, &alpha, dstTensorDesc, dstData, srcDiffTensorDesc, srcDiff, &beta, dstDiffTensorDesc, diffData)); } void softMaxLayer:: destroyHandles() { checkCUDNN(cudnnDestroyTensorDescriptor(srcTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(dstTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(srcDiffTensorDesc)); checkCUDNN(cudnnDestroyTensorDescriptor(dstDiffTensorDesc)); }
abde0a44e7561bb3c7d0f7e223e0e9df293cc31d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief Correlation operator * \author Xu Dong */ #include "./correlation-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CORRELATION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == Correlation Kernel template <typename Dtype> __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char); // First (upper left) position of kernel upper-left corner // in current center position of neighborhood in image 1 int x1 = blockIdx.x * stride1 + max_displacement; int y1 = blockIdx.y * stride1 + max_displacement; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for (int j = 0; j < kernel_size; j++) { // HEIGHT for (int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK]; // Compute correlation for (int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2; for (int j = 0; j < kernel_size; j++) { // HEIGHT for (int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) { // CHANNELS int x2 = x1 + s2o; int y2 = y1 + s2p; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if (ch_off == 0) { Dtype total_sum = 0; for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size * kernel_size * bottomchannels; const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x; top[index + item*topcount] = total_sum / static_cast<float>(sumelems); } // Aggregate result of different threads } } // == Correlation Backward Pass Kernel (For data1) template <typename Dtype> __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\ * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\ + (o + neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { // int l = index % bottomwidth + pad_size; //w-pos // int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos // int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; // Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m - s2p)) \ * pbottomwidth + (l - s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * \ neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y)\ * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size); bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Kernel Subtraction template <typename Dtype> __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; // w-pos int y = (index / topwidth) % topheight; // h-pos int c = (index / topwidth / topheight) % topchannels; // channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + max_displacement; // Iterate through 3D patch Dtype sum = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for (int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1 + s2p; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \ * bottomchannels + l; int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \ * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels; top[index + item * topcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\ * bottomchannels + n; // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\ + (l+s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; Dtype bot1tmp = bottom1[idxbot1]; Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\ + (o + neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { // int l = index % bottomwidth + pad_size; //w-pos // int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos // int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\ * bottomchannels + n; for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; // Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\ * bottomchannels + n; // bottom0[l+s2o,m+s2p,n] Dtype bot0tmp = bottom0[idxbot0]; Dtype bot1tmp = bottom1[idxbot1]; Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * \ neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y)\ * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size); bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x * blockDim.x + threadIdx.x; if (xy >= widthheight ) return; int ch = blockIdx.y; int n = blockIdx.z; Dtype value = in[(n * channels + ch) * widthheight + xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + padding); int xypad = ypad * (width + 2 * padding) + xpad; out[(n * pwidthheight + xypad) * channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream, hipStream_t stream_tmp1, hipStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_); hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp1, bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); hipLaunchKernelGGL(( blob_rearrange_kernel2<Dtype>), dim3(totalBlocksRearr), dim3(threads_per_block), 0, stream_tmp2, bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight + 2 * pad_size_; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; if (is_multiply == true) { // CorrelationLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); hipLaunchKernelGGL(( CorrelateData<Dtype>), dim3(totalBlocksCorr), dim3(threadsPerBlock), shared_memory_per_block * sizeof(Dtype), stream, topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } else { // CorrelationLayer for (int n = 0; n < num; n++) { int topThreadCount = topcount; const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\ / kMaxThreadsPerBlock; hipLaunchKernelGGL(( CorrelateDataSubtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream, topThreadCount, num, n, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } } } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, hipStream_t stream0, hipStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height + 2 * pad_size_; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // CorrelationLayerBackward if (is_multiply == true) { // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( CorrelateDataBackward0<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { hipLaunchKernelGGL(( CorrelateDataBackward1<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } } else { for (int n = 0; n < num; n++) { // Bottom0: hipLaunchKernelGGL(( CorrelateDataBackward0Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream0, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot1, rbot2, top_diff); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } for (int n = 0; n < num; n++) { // Bottom1: hipLaunchKernelGGL(( CorrelateDataBackward1Subtract<Dtype>), dim3(gridSize), dim3(kMaxThreadsPerBlock), 0, stream1, botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, rbot2, bottom1_diff, top_diff); CORRELATION_CUDA_CHECK(hipPeekAtLastError()); } } } } // namespace cuda template<typename Dtype> inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); hipStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { hipStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); hipStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(CorrelationParam param) { return new CorrelationOp<gpu>(param); } } // namespace op } // namespace mxnet
abde0a44e7561bb3c7d0f7e223e0e9df293cc31d.cu
/*! * Copyright [2016] <Contributors> * \file Correation.cu * \brief Correlation operator * \author Xu Dong */ #include "./correlation-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 #define CORRELATION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) namespace mshadow { namespace cuda { // == Correlation Kernel template <typename Dtype> __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = reinterpret_cast<Dtype *>(patch_data_char); // First (upper left) position of kernel upper-left corner // in current center position of neighborhood in image 1 int x1 = blockIdx.x * stride1 + max_displacement; int y1 = blockIdx.y * stride1 + max_displacement; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for (int j = 0; j < kernel_size; j++) { // HEIGHT for (int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[THREADS_PER_WARP * WARPS_PER_BLOCK]; // Compute correlation for (int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2; for (int j = 0; j < kernel_size; j++) { // HEIGHT for (int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for (int ch = ch_off; ch < bottomchannels; ch += (THREADS_PER_WARP * WARPS_PER_BLOCK)) { // CHANNELS int x2 = x1 + s2o; int y2 = y1 + s2p; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if (ch_off == 0) { Dtype total_sum = 0; for (int idx = 0; idx < THREADS_PER_WARP * WARPS_PER_BLOCK; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size * kernel_size * bottomchannels; const int index = ((top_channel * topheight + blockIdx.y) * topwidth) + blockIdx.x; top[index + item*topcount] = total_sum / static_cast<float>(sumelems); } // Aggregate result of different threads } } // == Correlation Backward Pass Kernel (For data1) template <typename Dtype> __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m + s2p)) * pbottomwidth + (l + s2o))\ * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\ + (o + neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { // int l = index % bottomwidth + pad_size; //w-pos // int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos // int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; // Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m - s2p)) \ * pbottomwidth + (l - s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * \ neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y)\ * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size); bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Kernel Subtraction template <typename Dtype> __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; // w-pos int y = (index / topwidth) % topheight; // h-pos int c = (index / topwidth / topheight) % topchannels; // channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + max_displacement; // Iterate through 3D patch Dtype sum = 0; for (int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for (int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for (int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1 + s2p; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1 + j) * bottomwidth + x1 + i) \ * bottomchannels + l; int idx2 = ((item * bottomheight + y2 + j) * bottomwidth + x2 + i) \ * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2 + 1) * bottomchannels; top[index + item * topcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; int idxbot0 = ((item * pbottomheight + m) * pbottomwidth + l)\ * bottomchannels + n; // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth-1) && (ymin <= topheight-1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth\ + (l+s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; Dtype bot1tmp = bottom1[idxbot1]; Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width\ + (o + neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius * 2 + 1) * (kernel_radius * 2+1) * bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { // int l = index % bottomwidth + pad_size; //w-pos // int m = (index / bottomwidth) % bottomheight + pad_size; // h-pos // int n = (index / bottomwidth / bottomheight) % bottomchannels; // channels int n = index % bottomchannels; // channels int l = (index / bottomchannels) % bottomwidth + pad_size; // w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; // h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; int idxbot1 = ((item * pbottomheight + m) * pbottomwidth + l)\ * bottomchannels + n; for (int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for (int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; // Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, // to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1)\ / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if (xmax >= 0 && ymax >= 0 && (xmin <= topwidth - 1) && (ymin <= topheight - 1)) { xmin = max(0, xmin); xmax = min(topwidth-1, xmax); ymin = max(0, ymin); ymax = min(topheight-1, ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m - s2p)) * pbottomwidth + (l - s2o))\ * bottomchannels + n; // bottom0[l+s2o,m+s2p,n] Dtype bot0tmp = bottom0[idxbot0]; Dtype bot1tmp = bottom1[idxbot1]; Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * \ neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for (int y = ymin; y <= ymax; y++) { for (int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y)\ * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m - pad_size)) * bottomwidth + (l - pad_size); bottom1diff[bot1index + item * bottomcount] = sum / static_cast<float>(sumelems); } } // == Forward // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { // change shape from [batchsize,channel,y,x] to [batchsize,y,x,channel] int xy = blockIdx.x * blockDim.x + threadIdx.x; if (xy >= widthheight ) return; int ch = blockIdx.y; int n = blockIdx.z; Dtype value = in[(n * channels + ch) * widthheight + xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + padding); int xypad = ypad * (width + 2 * padding) + xpad; out[(n * pwidthheight + xypad) * channels + ch] = value; } template <typename Dtype> void Forward_gpu( const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream, cudaStream_t stream_tmp1, cudaStream_t stream_tmp2) { const Dtype *bottom_data1 = data1.dptr_; const Dtype *bottom_data2 = data2.dptr_; Dtype *rbot1 = tmp1.dptr_; Dtype *rbot2 = tmp2.dptr_; Dtype *top = out.dptr_; const int bnum = data1.size(0); const int bchannels = data1.size(1); const int bheight = data1.size(2); const int bwidth = data1.size(3); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); int threads_per_block = 16; dim3 totalBlocksRearr((bwidthheight - 1) / threads_per_block + 1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp1>>> (bottom_data1, rbot1, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr, threads_per_block, 0, stream_tmp2>>> (bottom_data2, rbot2, bnum, bchannels, bwidth, bheight, bwidthheight, pad_size_, pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight + 2 * pad_size_; const int width = bwidth + 2 * pad_size_; const int shared_memory_per_block = (kernel_size_ * kernel_size_) * bchannels; if (is_multiply == true) { // CorrelationLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); CorrelateData<Dtype><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype), stream>>>( topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } else { // CorrelationLayer for (int n = 0; n < num; n++) { int topThreadCount = topcount; const int gridSize = (topThreadCount + kMaxThreadsPerBlock - 1)\ / kMaxThreadsPerBlock; CorrelateDataSubtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream>>>( topThreadCount, num, n, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, channels, rbot1, rbot2, top); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } } } template <typename Dtype> void Backward_gpu( const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, cudaStream_t stream0, cudaStream_t stream1, int num, int channels, int height, int width) { // Get top diff, compute bottom diff const Dtype* top_diff = out_grad.dptr_; Dtype* bottom0_diff = in_grad1.dptr_; Dtype* bottom1_diff = in_grad2.dptr_; const Dtype* rbot1 = tmp1.dptr_; const Dtype* rbot2 = tmp2.dptr_; const int paddedheight = height + 2 * pad_size_; const int paddedwidth = width + 2 * pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; const int gridSize = (botThreadCount + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; // CorrelationLayerBackward if (is_multiply == true) { // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); // First dim is fastest const int buffer_size_backw0 = \ (static_cast<int>(ceil(static_cast<float>(2 * kernel_radius_)\ / static_cast<float>(stride1_))) + 1) * top_channels_; // == Run kernel Backward 0 for (int n = 0; n < num; n++) { CorrelateDataBackward0<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2, top_diff); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } // == Run kernel Backward 1 for (int n = 0; n < num; n++) { CorrelateDataBackward1<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, bottom1_diff, top_diff); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } } else { for (int n = 0; n < num; n++) { // Bottom0: CorrelateDataBackward0Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream0>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot1, rbot2, top_diff); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } for (int n = 0; n < num; n++) { // Bottom1: CorrelateDataBackward1Subtract<Dtype><<<gridSize, kMaxThreadsPerBlock, 0, stream1>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1, rbot2, bottom1_diff, top_diff); CORRELATION_CUDA_CHECK(cudaPeekAtLastError()); } } } } // namespace cuda template<typename Dtype> inline void CorrelationForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data1, const Tensor<gpu, 4, Dtype> &data2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_ ) { cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cudaStream_t stream_tmp1 = Stream<gpu>::GetStream(tmp1.stream_); cudaStream_t stream_tmp2 = Stream<gpu>::GetStream(tmp2.stream_); cuda::Forward_gpu(out, data1, data2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream, stream_tmp1, stream_tmp2); } template<typename Dtype> inline void CorrelationBackward(const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 4, Dtype> &in_grad1, const Tensor<gpu, 4, Dtype> &in_grad2, const Tensor<gpu, 4, Dtype> &tmp1, const Tensor<gpu, 4, Dtype> &tmp2, int top_channels_, int top_height_, int top_width_, int pad_size_, bool is_multiply, int max_displacement_, int kernel_size_, int neighborhood_grid_radius_, int neighborhood_grid_width_, int kernel_radius_, int stride1_, int stride2_, int num, int channels, int height, int width ) { cudaStream_t stream0 = Stream<gpu>::GetStream(in_grad1.stream_); cudaStream_t stream1 = Stream<gpu>::GetStream(in_grad2.stream_); cuda::Backward_gpu(out_grad, in_grad1, in_grad2, tmp1, tmp2, top_channels_, top_height_, top_width_, pad_size_, is_multiply, max_displacement_, kernel_size_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, stream0, stream1, num, channels, height, width); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(CorrelationParam param) { return new CorrelationOp<gpu>(param); } } // namespace op } // namespace mxnet
4c1342890b2ded55728befb3bf014dd769611e44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.h" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. float result = 0; for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2]; result += image_value * filter_value; } } outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize = dim3(128, 5, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize = dim3(ceil(numCols / 128.0), ceil(numRows / 5.0), 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels) , dim3(gridSize), dim3(blockSize) , 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels) , dim3(gridSize), dim3(blockSize) , 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
4c1342890b2ded55728befb3bf014dd769611e44.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "reference_calc.h" #include "utils.h" __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. float result = 0; for (int filter_r = -filterWidth / 2; filter_r <= filterWidth / 2; ++filter_r) { for (int filter_c = -filterWidth / 2; filter_c <= filterWidth / 2; ++filter_c) { //Find the global image position for this filter position //clamp to boundary of the image int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1)); float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]); float filter_value = filter[(filter_r + filterWidth / 2) * filterWidth + filter_c + filterWidth / 2]; result += image_value * filter_value; } } outputChannel[thread_2D_pos.y * numCols + thread_2D_pos.x] = result; } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize = dim3(128, 5, 1); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize = dim3(ceil(numCols / 128.0), ceil(numRows / 5.0), 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels <<<gridSize, blockSize >>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur <<<gridSize, blockSize >>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur <<<gridSize, blockSize >>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth); gaussian_blur <<<gridSize, blockSize >>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels <<<gridSize, blockSize >>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
0959a8c7bf3a5ee61f753de787cf88acb63ffdf6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> #include <rocblas.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void MatrixMulCUDAEncoding( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict __shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N]; int ROW_PTR[7] = {1,4,3,5,7,0,2}; // add encoding // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col K )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register //#pragma unroll //for(int i=0; i<4; i++){ // ROW_PTR[i] = i*BLOCK_SIZE_K+k; //} #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { // add one decoding overhead frag_a[thread_y] = reinterpret_cast<float*>(As)[ROW_PTR[((ty * THREAD_SIZE_Y + thread_y) * BLOCK_SIZE_K + k)%7]]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } __syncthreads(); } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } } // TODO add shuffle to enable GPU write back col
0959a8c7bf3a5ee61f753de787cf88acb63ffdf6.cu
#include <stdio.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> #include <cublas_v2.h> #include "dense_help_func.hpp" // cal offset from row col and ld , in row-major matrix, ld is the width of the matrix #define OFFSET(row, col, ld) ((row) * (ld) + (col)) // transfer float4 #define FETCH_FLOAT4(pointer) (reinterpret_cast<float4*>(&(pointer))[0]) template < const int BLOCK_SIZE_M, // width of block of C that each thread block calculate const int BLOCK_SIZE_K, // height of block of A that each thread block load into shared memory const int BLOCK_SIZE_N, // height of block of C that each thread block calculate const int THREAD_SIZE_Y, // height of block of C that each thread calculate const int THREAD_SIZE_X, // width of block of C that each thread calculate const bool ENABLE_DOUBLE_BUFFER // whether enable double buffering or not > __global__ void MatrixMulCUDAEncoding( float * __restrict__ A, float * __restrict__ B, float * __restrict__ C, const int K, const int N) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // size of thread block const int bszx = BLOCK_SIZE_N / THREAD_SIZE_X; const int bszy = BLOCK_SIZE_M / THREAD_SIZE_Y; const int THREAD_NUM_PER_BLOCK = bszy * bszx; // thread id const int tid = ty * bszx + tx; // shared memory __shared__ float As[BLOCK_SIZE_M][BLOCK_SIZE_K]; // avoid bank conflict __shared__ float Bs[BLOCK_SIZE_K][BLOCK_SIZE_N]; int ROW_PTR[7] = {1,4,3,5,7,0,2}; // add encoding // registers for C float accum[THREAD_SIZE_Y][THREAD_SIZE_X] = {0}; // registers for A and B float frag_a[THREAD_SIZE_Y]; float frag_b[THREAD_SIZE_X]; // threads needed to load one row of tile // / 4 is because float4 is used const int A_TILE_THREAD_PER_ROW = BLOCK_SIZE_K / 4; const int B_TILE_THREAD_PER_ROW = BLOCK_SIZE_N / 4; // row number and col number that needs to be loaded by this thread const int A_TILE_ROW_START = tid / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_START = tid / B_TILE_THREAD_PER_ROW; const int A_TILE_COL = tid % A_TILE_THREAD_PER_ROW * 4; const int B_TILE_COL = tid % B_TILE_THREAD_PER_ROW * 4; // row stride that thread uses to load multiple rows of a tile const int A_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / A_TILE_THREAD_PER_ROW; const int B_TILE_ROW_STRIDE = THREAD_NUM_PER_BLOCK / B_TILE_THREAD_PER_ROW; // can not unroll since K can not be determined at this point for (int tile_idx = 0 ; tile_idx < K ; tile_idx += BLOCK_SIZE_K) { // load A from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_M ; i += A_TILE_ROW_STRIDE) { FETCH_FLOAT4(As[A_TILE_ROW_START + i][A_TILE_COL]) = FETCH_FLOAT4(A[OFFSET( BLOCK_SIZE_M * by + A_TILE_ROW_START + i, // row A_TILE_COL + tile_idx, // col K )]); } // load B from global memory to shared memory #pragma unroll for ( int i = 0 ; i < BLOCK_SIZE_K; i += B_TILE_ROW_STRIDE) { FETCH_FLOAT4(Bs[B_TILE_ROW_START + i][B_TILE_COL]) = FETCH_FLOAT4(B[OFFSET( tile_idx + B_TILE_ROW_START + i, // row B_TILE_COL + BLOCK_SIZE_N * bx, // col K )]); } __syncthreads(); // compute c #pragma unroll for (int k = 0; k < BLOCK_SIZE_K; ++ k) { // load A from shared memory to register //#pragma unroll //for(int i=0; i<4; i++){ // ROW_PTR[i] = i*BLOCK_SIZE_K+k; //} #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { // add one decoding overhead frag_a[thread_y] = reinterpret_cast<float*>(As)[ROW_PTR[((ty * THREAD_SIZE_Y + thread_y) * BLOCK_SIZE_K + k)%7]]; } // load B from shared memory to register #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; thread_x += 4) { FETCH_FLOAT4(frag_b[thread_x]) = FETCH_FLOAT4(Bs[k][THREAD_SIZE_X * tx + thread_x]); } #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { accum[thread_y][thread_x] += frag_a[thread_y] * frag_b[thread_x]; } } } __syncthreads(); } // store back to C #pragma unroll for (int thread_y = 0; thread_y < THREAD_SIZE_Y; ++thread_y) { #pragma unroll for (int thread_x = 0; thread_x < THREAD_SIZE_X; ++thread_x) { C[OFFSET( BLOCK_SIZE_M * by + ty * THREAD_SIZE_Y + thread_y, BLOCK_SIZE_N * bx + tx * THREAD_SIZE_X + thread_x, N)] = accum[thread_y][thread_x]; } } } // TODO add shuffle to enable GPU write back col
445038c72a9f9fde1df72dac27c0b252fe6f29b2.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2019-2023, XGBoost contributors */ #include <memory> #include <utility> #include "ellpack_page.cuh" #include "ellpack_page.h" // for EllpackPage #include "ellpack_page_source.h" namespace xgboost::data { void EllpackPageSource::Fetch() { dh::safe_cuda(hipSetDevice(device_)); if (!this->ReadCache()) { if (count_ != 0 && !sync_) { // source is initialized to be the 0th page during construction, so when count_ is 0 // there's no need to increment the source. ++(*source_); } // This is not read from cache so we still need it to be synced with sparse page source. CHECK_EQ(count_, source_->Iter()); auto const &csr = source_->Page(); this->page_.reset(new EllpackPage{}); auto *impl = this->page_->Impl(); *impl = EllpackPageImpl(device_, *cuts_, *csr, is_dense_, row_stride_, feature_types_); page_->SetBaseRowId(csr->base_rowid); this->WriteCache(); } } } // namespace xgboost::data
445038c72a9f9fde1df72dac27c0b252fe6f29b2.cu
/** * Copyright 2019-2023, XGBoost contributors */ #include <memory> #include <utility> #include "ellpack_page.cuh" #include "ellpack_page.h" // for EllpackPage #include "ellpack_page_source.h" namespace xgboost::data { void EllpackPageSource::Fetch() { dh::safe_cuda(cudaSetDevice(device_)); if (!this->ReadCache()) { if (count_ != 0 && !sync_) { // source is initialized to be the 0th page during construction, so when count_ is 0 // there's no need to increment the source. ++(*source_); } // This is not read from cache so we still need it to be synced with sparse page source. CHECK_EQ(count_, source_->Iter()); auto const &csr = source_->Page(); this->page_.reset(new EllpackPage{}); auto *impl = this->page_->Impl(); *impl = EllpackPageImpl(device_, *cuts_, *csr, is_dense_, row_stride_, feature_types_); page_->SetBaseRowId(csr->base_rowid); this->WriteCache(); } } } // namespace xgboost::data
c4707d8a44755c93a44959fad51f3ac6de3dcbd0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cuda */ #include <hip/hip_runtime.h> #include <rocblas.h> #include <iostream> int main() { hipblasHandle_t handle; hipblasCreate(&handle); hipblasDestroy(handle); return 0; }
c4707d8a44755c93a44959fad51f3ac6de3dcbd0.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> /* Includes, cuda */ #include <cuda_runtime.h> #include <cublas_v2.h> #include <iostream> int main() { cublasHandle_t handle; cublasCreate(&handle); cublasDestroy(handle); return 0; }
a0c87ad835112714711da54ccce6fd7b78f71003.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); in_delta[in_index] += out_delta[out_index] / (w*h); } }
a0c87ad835112714711da54ccce6fd7b78f71003.cu
#include "includes.h" __global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id >= n) return; int k = id % c; id /= c; int b = id; int i; int out_index = (k + c*b); for(i = 0; i < w*h; ++i){ int in_index = i + h*w*(k + b*c); in_delta[in_index] += out_delta[out_index] / (w*h); } }
a9a8e8a7aa2fce1047b70f12f4923cce924c76b8.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/nvidia/fp16_emu.cuh> #include <thrust/execution_policy.h> #include <thrust/transform.h> namespace faiss { namespace gpu { bool getDeviceSupportsFloat16Math(int device) { const auto& prop = getDeviceProperties(device); return (prop.major >= 6 || (prop.major == 5 && prop.minor >= 3)); } __half hostFloat2Half(float a) { #if TORCH_HIP_VERSION >= 9000 __half_raw raw; raw.x = cpu_float2half_rn(a).x; return __half(raw); #else __half h; h.x = cpu_float2half_rn(a).x; return h; #endif } } } // namespace
a9a8e8a7aa2fce1047b70f12f4923cce924c76b8.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/nvidia/fp16_emu.cuh> #include <thrust/execution_policy.h> #include <thrust/transform.h> namespace faiss { namespace gpu { bool getDeviceSupportsFloat16Math(int device) { const auto& prop = getDeviceProperties(device); return (prop.major >= 6 || (prop.major == 5 && prop.minor >= 3)); } __half hostFloat2Half(float a) { #if CUDA_VERSION >= 9000 __half_raw raw; raw.x = cpu_float2half_rn(a).x; return __half(raw); #else __half h; h.x = cpu_float2half_rn(a).x; return h; #endif } } } // namespace
bbb7b3a391c2589ae869c0505a9b08def4f5c030.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* 2013 * Maciej Szeptuch * IIUWr */ #include <cstdlib> #include <cstdio> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #include <hiprand/hiprand_kernel.h> #define EPS 0.00001 #define cudaErr(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(hipError_t code, char *file, int line) { if(code != hipSuccess) { fprintf(stderr,"%s:%d CUDA: %s(%d)\n", file, line, hipGetErrorString(code), code); exit(code); } } inline int divup(int a, int b) { return (a + b - 1) / b; } // GLUT and drawing stuff void menuDraw(void); void cudaDraw(void); void cudaInit(void); void glutDisplayCallback(void); void glutKeyboardCallback(unsigned char key, int, int); void glutReshapeCallback(int w, int h); void cleanup(void); __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix); __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix); int width = 800, height = 600, steps = 50, posX = 0, posY = 0; float scale = 10; float matrix[12] = { -0.40, 0.00, -1.00, 0.00, -0.40, 0.10, 0.76, -0.40, 0.00, 0.40, 0.76, 0.00, }; int *picture; GLuint data; int *cudaData; float *cudaMatrix; struct ActEdit { int m; int x; int y; } actEdit; dim3 blockSize(16,16); dim3 gridSize; int main(int argc, char *argv[]) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE); glutInitWindowSize(width, height); glutCreateWindow("CUDA GL IFS"); glutDisplayFunc(glutDisplayCallback); glutKeyboardFunc(glutKeyboardCallback); glutReshapeFunc(glutReshapeCallback); glewInit(); if(!glewIsSupported("GL_VERSION_2_1")) { fprintf(stderr, "OpenGL >= 2.1 required\n"); return 2; } cudaInit(); atexit(cleanup); glutMainLoop(); return 0; } void cleanup(void) { hipGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; hipFree(cudaMatrix); } void glutReshapeCallback(int w, int h) { width = w; height = h; cudaInit(); glViewport(0, 0, w, h); glLoadIdentity(); glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0); } void glutKeyboardCallback(unsigned char key, int, int) { switch(key) { case '\e': case 'q': case 'Q': exit(3); break; case '\t': ++ actEdit.x; if(actEdit.x == 3) { ++ actEdit.y; actEdit.x = 0; } if(actEdit.y == 2) { ++ actEdit.m; actEdit.y = 0; } if(actEdit.m == 2) actEdit.m = 0; break; case '+': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] += 0.01; cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); break; case '-': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] -= 0.01; cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); break; case '[': scale += 0.1; break; case ']': scale -= 0.1; break; case ',': steps -= 1; break; case '.': steps += 1; break; case 'w': posY += 5; break; case 's': posY -= 5; break; case 'a': posX -= 5; break; case 'd': posX += 5; break; } menuDraw(); glutPostRedisplay(); } void glutDisplayCallback(void) { menuDraw(); cudaDraw(); hipDeviceSynchronize(); glClear(GL_COLOR_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glRasterPos2i(0, 0); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); glutReportErrors(); } void cudaInit(void) { if(data) { hipGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; hipFree(cudaMatrix); } glGenBuffers(1, &data); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, data); glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * sizeof(GLubyte) * 4, 0, GL_STREAM_DRAW); picture = new int[width * height]; memset(picture, 0, width * height * sizeof(int)); cudaErr(hipGLRegisterBufferObject(data)); gridSize = dim3(divup(width, blockSize.x), divup(height, blockSize.y)); cudaErr(hipMalloc(&cudaMatrix, 2 * 2 * 3 * sizeof(float))); cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); } void cudaDraw(void) { hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, NULL); cudaErr(hipGLMapBufferObject__((void **) &cudaData, data)); cudaErr(hipMemcpy(cudaData, picture, width * height * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( draw), dim3(gridSize), dim3(blockSize), 0, 0, cudaData, width, height, scale, steps, posX, posY, cudaMatrix); cudaErr(hipPeekAtLastError()); cudaErr(hipDeviceSynchronize()); cudaErr(hipGLUnmapBufferObject(data)); hipEventRecord(end, NULL); hipEventSynchronize(end); float gputotal = 0; hipEventElapsedTime(&gputotal, start, end); printf("========== ][ Kernel took: %5.2f ][ ==========\n", gputotal); } __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix) { dx = sx * _matrix[0] + sy * _matrix[1] + _matrix[2]; dy = sx * _matrix[3] + sy * _matrix[4] + _matrix[5]; } __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int id = y * width + x; hiprandState_t salt; // hiprand_init(1337, id, 0, &salt); - SLOW AS HELL hiprand_init((1337 << 20) + id, 0, 0, &salt); if(x >= width || y >= height) return; float px = x - width / 2, py = y - height / 2, lx = 0.0, ly = 0.0; for(int t = 0; t < 32; ++ t) multiply(px, py, px, py, _matrix + (hiprand(&salt) & 1) * 6); for(int t = 0; t < steps; ++ t) { multiply(px, py, px, py, _matrix + (hiprand(&salt) & 1) * 6); if(abs(px - lx) < EPS && abs(py - ly) < EPS) break; int _x = px / scale * width + width / 2 - posX; int _y = py / scale * height + height / 2 - posY; if(0 <= _x && _x < width && 0 <= _y && _y < height) picture[_y * width + _x] = 0xFFFFFF; lx = px; ly = py; } } void menuDraw(void) { system("clear"); puts("========== ][ CUDA IFS ][ =========="); printf("Resolution: %dx%d | Position (%d, %d)\n", width, height, posX, posY); printf("Scale: %4.1f | Steps: %3d\n", 10. / scale, steps); puts("Matrices: "); for(int m = 0; m < 2; ++ m) { puts(""); for(int y = 0; y < 2; ++ y) { printf("|"); for(int x = 0; x < 3; ++ x) { if(actEdit.m == m && actEdit.y == y && actEdit.x == x) printf("*%5.2f*", matrix[m * 6 + y * 3 + x]); else printf(" %5.2f ", matrix[m * 6 + y * 3 + x]); if(x == 1) printf("| |"); } puts("|"); } } puts(""); }
bbb7b3a391c2589ae869c0505a9b08def4f5c030.cu
/* 2013 * Maciej Szeptuch * IIUWr */ #include <cstdlib> #include <cstdio> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #include <curand_kernel.h> #define EPS 0.00001 #define cudaErr(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, char *file, int line) { if(code != cudaSuccess) { fprintf(stderr,"%s:%d CUDA: %s(%d)\n", file, line, cudaGetErrorString(code), code); exit(code); } } inline int divup(int a, int b) { return (a + b - 1) / b; } // GLUT and drawing stuff void menuDraw(void); void cudaDraw(void); void cudaInit(void); void glutDisplayCallback(void); void glutKeyboardCallback(unsigned char key, int, int); void glutReshapeCallback(int w, int h); void cleanup(void); __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix); __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix); int width = 800, height = 600, steps = 50, posX = 0, posY = 0; float scale = 10; float matrix[12] = { -0.40, 0.00, -1.00, 0.00, -0.40, 0.10, 0.76, -0.40, 0.00, 0.40, 0.76, 0.00, }; int *picture; GLuint data; int *cudaData; float *cudaMatrix; struct ActEdit { int m; int x; int y; } actEdit; dim3 blockSize(16,16); dim3 gridSize; int main(int argc, char *argv[]) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE); glutInitWindowSize(width, height); glutCreateWindow("CUDA GL IFS"); glutDisplayFunc(glutDisplayCallback); glutKeyboardFunc(glutKeyboardCallback); glutReshapeFunc(glutReshapeCallback); glewInit(); if(!glewIsSupported("GL_VERSION_2_1")) { fprintf(stderr, "OpenGL >= 2.1 required\n"); return 2; } cudaInit(); atexit(cleanup); glutMainLoop(); return 0; } void cleanup(void) { cudaGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; cudaFree(cudaMatrix); } void glutReshapeCallback(int w, int h) { width = w; height = h; cudaInit(); glViewport(0, 0, w, h); glLoadIdentity(); glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0); } void glutKeyboardCallback(unsigned char key, int, int) { switch(key) { case '\e': case 'q': case 'Q': exit(3); break; case '\t': ++ actEdit.x; if(actEdit.x == 3) { ++ actEdit.y; actEdit.x = 0; } if(actEdit.y == 2) { ++ actEdit.m; actEdit.y = 0; } if(actEdit.m == 2) actEdit.m = 0; break; case '+': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] += 0.01; cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); break; case '-': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] -= 0.01; cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); break; case '[': scale += 0.1; break; case ']': scale -= 0.1; break; case ',': steps -= 1; break; case '.': steps += 1; break; case 'w': posY += 5; break; case 's': posY -= 5; break; case 'a': posX -= 5; break; case 'd': posX += 5; break; } menuDraw(); glutPostRedisplay(); } void glutDisplayCallback(void) { menuDraw(); cudaDraw(); cudaThreadSynchronize(); glClear(GL_COLOR_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glRasterPos2i(0, 0); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); glutReportErrors(); } void cudaInit(void) { if(data) { cudaGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; cudaFree(cudaMatrix); } glGenBuffers(1, &data); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, data); glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * sizeof(GLubyte) * 4, 0, GL_STREAM_DRAW); picture = new int[width * height]; memset(picture, 0, width * height * sizeof(int)); cudaErr(cudaGLRegisterBufferObject(data)); gridSize = dim3(divup(width, blockSize.x), divup(height, blockSize.y)); cudaErr(cudaMalloc(&cudaMatrix, 2 * 2 * 3 * sizeof(float))); cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); } void cudaDraw(void) { cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, NULL); cudaErr(cudaGLMapBufferObject((void **) &cudaData, data)); cudaErr(cudaMemcpy(cudaData, picture, width * height * sizeof(int), cudaMemcpyHostToDevice)); draw<<<gridSize, blockSize>>>(cudaData, width, height, scale, steps, posX, posY, cudaMatrix); cudaErr(cudaPeekAtLastError()); cudaErr(cudaDeviceSynchronize()); cudaErr(cudaGLUnmapBufferObject(data)); cudaEventRecord(end, NULL); cudaEventSynchronize(end); float gputotal = 0; cudaEventElapsedTime(&gputotal, start, end); printf("========== ][ Kernel took: %5.2f ][ ==========\n", gputotal); } __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix) { dx = sx * _matrix[0] + sy * _matrix[1] + _matrix[2]; dy = sx * _matrix[3] + sy * _matrix[4] + _matrix[5]; } __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int id = y * width + x; curandState salt; // curand_init(1337, id, 0, &salt); - SLOW AS HELL curand_init((1337 << 20) + id, 0, 0, &salt); if(x >= width || y >= height) return; float px = x - width / 2, py = y - height / 2, lx = 0.0, ly = 0.0; for(int t = 0; t < 32; ++ t) multiply(px, py, px, py, _matrix + (curand(&salt) & 1) * 6); for(int t = 0; t < steps; ++ t) { multiply(px, py, px, py, _matrix + (curand(&salt) & 1) * 6); if(abs(px - lx) < EPS && abs(py - ly) < EPS) break; int _x = px / scale * width + width / 2 - posX; int _y = py / scale * height + height / 2 - posY; if(0 <= _x && _x < width && 0 <= _y && _y < height) picture[_y * width + _x] = 0xFFFFFF; lx = px; ly = py; } } void menuDraw(void) { system("clear"); puts("========== ][ CUDA IFS ][ =========="); printf("Resolution: %dx%d | Position (%d, %d)\n", width, height, posX, posY); printf("Scale: %4.1f | Steps: %3d\n", 10. / scale, steps); puts("Matrices: "); for(int m = 0; m < 2; ++ m) { puts(""); for(int y = 0; y < 2; ++ y) { printf("|"); for(int x = 0; x < 3; ++ x) { if(actEdit.m == m && actEdit.y == y && actEdit.x == x) printf("*%5.2f*", matrix[m * 6 + y * 3 + x]); else printf(" %5.2f ", matrix[m * 6 + y * 3 + x]); if(x == 1) printf("| |"); } puts("|"); } } puts(""); }
8c271e1e42f1cd61ad43b4979b5e3eb8eea688fd.hip
// !!! This is a file automatically generated by hipify!!! #include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include<opencv2/opencv.hpp> #include <iostream> #include <hip/hip_runtime.h> #include "device_launch_parameters.h" #include "GpuTimer.h" #include<time.h> using namespace cv; using namespace std; #define BLUR_SIZE 3 #define BlockSize 16 /* ******************************************************************************************************************************************************* | The below code was implemented using CUDA 10.0 with OpenCV 4.0.1. The minor change is the flag used in IMREAD() that was changed to IMREAD_GRAYSCALE| | ******************************************************************************************************************************************************* */ // Serial implementation for running on CPU using a single thread. void ImageBlurCpu(unsigned char* blurImg, unsigned char* InputImg,int width, int height) { int sum, pixelnum; int lastRow, lastCol; int rowfilter, colfilter; int blurSize = 2 * BLUR_SIZE + 1; for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { lastRow = row - BLUR_SIZE; lastCol = col - BLUR_SIZE; pixelnum = 0; sum = 0; for (int i = 0; i < blurSize; i++) { for (int j = 0; j < blurSize; j++) { rowfilter = lastRow + i; colfilter = lastCol + j; if ((rowfilter >= 0) && (rowfilter <= height) && (colfilter >= 0) && (colfilter <= width)) { sum += InputImg[rowfilter*width + colfilter]; pixelnum++; } } } blurImg[row*width + col] = (unsigned char)(sum/pixelnum); } } } // The input image is grayscale and is encoded as unsigned characters [0, 255] __global__ void ImageBlur(unsigned char *out, unsigned char *in, int width, int height) { int rowfilter, colfilter; int lastRow, lastCol; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int blurSize = 2 * BLUR_SIZE +1; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if ((row < height) && (col < width)) { int pixelnum = 0; int sum = 0; lastRow = row - BLUR_SIZE; lastCol = col - BLUR_SIZE; for (int i = 0; i < blurSize; i++) { for (int j = 0; j < blurSize; j++) { rowfilter = lastRow + i; colfilter = lastCol + j; if ((rowfilter >= 0) && (rowfilter <= height) && (colfilter >= 0) && (colfilter <= width)) { sum += in[rowfilter*width + colfilter]; pixelnum++; } } } out[row*width + col] = (unsigned char)(sum / pixelnum); } } int main(void) { //Read the image using OpenCV Mat image; //Create matrix to read iamge image= imread("Tiger.jpg", IMREAD_GRAYSCALE); if (image.empty()) { printf("Cannot read image file %s", "Tiger.jpg"); exit(1); } int imageWidth=image.cols; int imageHeight=image.rows; //Allocate the host image vectors unsigned char *h_OrigImage; unsigned char *h_BlurImage= (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight); unsigned char *h_BlurImage_CPU= (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight); h_OrigImage = image.data; //The data member of a Mat object returns the pointer to the first row, first column of the image. //try image.ptr() hipError_t err = hipSuccess; //Allocate memory on the device for the original image and the blurred image and record the needed time unsigned char *d_OrigImage, *d_BlurImage; GpuTimer timer; timer.Start(); //@@ Insert Your code Here to allocate memory on the device for original and blurred images err = hipMalloc((void **)&d_OrigImage, sizeof(unsigned char)*imageWidth*imageHeight); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device Original Image Vector (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **)&d_BlurImage, sizeof(unsigned char)*imageWidth*imageHeight); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device Blurred Image Vector (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } timer.Stop(); printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed()); //Copy the original image from the host to the device and record the needed time GpuTimer timer1; timer1.Start(); //@@ Insert your code here to Copy the original image from the host to the device hipMemcpy(d_OrigImage, h_OrigImage, sizeof(unsigned char)*imageWidth*imageHeight, hipMemcpyHostToDevice); timer1.Stop(); printf("Time to copy the Original image from the host to the device is: %f msecs.\n", timer1.Elapsed()); //Do the Processing on the GPU //Kernel Execution Configuration Parameters dim3 dimBlock(16, 16, 1); //@@ Insert Your code Here for grid dimensions dim3 dimGrid(ceil(imageWidth / 16.0), ceil(imageHeight / 16.0), 1); //Invoke the ImageBlur kernel and record the needed time for its execution //GpuTimer timer; GpuTimer timer2; timer2.Start(); //@@ Insert your code here for kernel invocation hipLaunchKernelGGL(( ImageBlur), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_BlurImage, d_OrigImage, imageWidth, imageHeight); timer2.Stop(); printf("Implemented ImageBlur Kernel ran in: %f msecs.\n", timer2.Elapsed()); //Copy resulting blurred image from device to host and record the needed time GpuTimer timer3; timer3.Start(); //@@ Insert your code here to Copy resulting blurred image from device to host hipMemcpy(h_BlurImage, d_BlurImage, sizeof(unsigned char)*imageWidth*imageHeight, hipMemcpyDeviceToHost); timer3.Stop(); printf("Time to copy the blurred image from the device to the host is: %f msecs.\n", timer3.Elapsed()); //Do the Processing on the CPU clock_t begin = clock(); //@@ Insert your code her to call the cpu function for ImageBlur on the CPU ImageBlurCpu(h_BlurImage_CPU,h_OrigImage, imageWidth, imageHeight); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC*1000; printf("Implemented CPU code ran in: %f msecs.\n", time_spent); //Postprocess and Display the resulting images using OpenCV Mat Image1(imageHeight, imageWidth,CV_8UC1,h_BlurImage); //grayscale image mat object Mat Image2(imageHeight,imageWidth,CV_8UC1,h_BlurImage_CPU ); //grayscale image mat object namedWindow("CPUImage", WINDOW_NORMAL); //Create window to display the image namedWindow("GPUImage", WINDOW_NORMAL); namedWindow("OriginalImage", WINDOW_NORMAL); imshow("GPUImage",Image1); imshow("CPUImage",Image2); //Display the image in the window imshow("OriginalImage", image); //Display the original image in the window waitKey(0); //Wait till you press a key //Free host memory image.release(); Image1.release(); Image2.release(); free(h_BlurImage); free(h_BlurImage_CPU); //Free device memory //@@ Insert your code here to free device memory hipFree(d_OrigImage); hipFree(d_BlurImage); return 0; }
8c271e1e42f1cd61ad43b4979b5e3eb8eea688fd.cu
#include <opencv2/core/core.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include<opencv2/opencv.hpp> #include <iostream> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include "GpuTimer.h" #include<time.h> using namespace cv; using namespace std; #define BLUR_SIZE 3 #define BlockSize 16 /* ******************************************************************************************************************************************************* | The below code was implemented using CUDA 10.0 with OpenCV 4.0.1. The minor change is the flag used in IMREAD() that was changed to IMREAD_GRAYSCALE| | ******************************************************************************************************************************************************* */ // Serial implementation for running on CPU using a single thread. void ImageBlurCpu(unsigned char* blurImg, unsigned char* InputImg,int width, int height) { int sum, pixelnum; int lastRow, lastCol; int rowfilter, colfilter; int blurSize = 2 * BLUR_SIZE + 1; for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { lastRow = row - BLUR_SIZE; lastCol = col - BLUR_SIZE; pixelnum = 0; sum = 0; for (int i = 0; i < blurSize; i++) { for (int j = 0; j < blurSize; j++) { rowfilter = lastRow + i; colfilter = lastCol + j; if ((rowfilter >= 0) && (rowfilter <= height) && (colfilter >= 0) && (colfilter <= width)) { sum += InputImg[rowfilter*width + colfilter]; pixelnum++; } } } blurImg[row*width + col] = (unsigned char)(sum/pixelnum); } } } // The input image is grayscale and is encoded as unsigned characters [0, 255] __global__ void ImageBlur(unsigned char *out, unsigned char *in, int width, int height) { int rowfilter, colfilter; int lastRow, lastCol; int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int blurSize = 2 * BLUR_SIZE +1; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; if ((row < height) && (col < width)) { int pixelnum = 0; int sum = 0; lastRow = row - BLUR_SIZE; lastCol = col - BLUR_SIZE; for (int i = 0; i < blurSize; i++) { for (int j = 0; j < blurSize; j++) { rowfilter = lastRow + i; colfilter = lastCol + j; if ((rowfilter >= 0) && (rowfilter <= height) && (colfilter >= 0) && (colfilter <= width)) { sum += in[rowfilter*width + colfilter]; pixelnum++; } } } out[row*width + col] = (unsigned char)(sum / pixelnum); } } int main(void) { //Read the image using OpenCV Mat image; //Create matrix to read iamge image= imread("Tiger.jpg", IMREAD_GRAYSCALE); if (image.empty()) { printf("Cannot read image file %s", "Tiger.jpg"); exit(1); } int imageWidth=image.cols; int imageHeight=image.rows; //Allocate the host image vectors unsigned char *h_OrigImage; unsigned char *h_BlurImage= (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight); unsigned char *h_BlurImage_CPU= (unsigned char *)malloc(sizeof(unsigned char)*imageWidth*imageHeight); h_OrigImage = image.data; //The data member of a Mat object returns the pointer to the first row, first column of the image. //try image.ptr() cudaError_t err = cudaSuccess; //Allocate memory on the device for the original image and the blurred image and record the needed time unsigned char *d_OrigImage, *d_BlurImage; GpuTimer timer; timer.Start(); //@@ Insert Your code Here to allocate memory on the device for original and blurred images err = cudaMalloc((void **)&d_OrigImage, sizeof(unsigned char)*imageWidth*imageHeight); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device Original Image Vector (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **)&d_BlurImage, sizeof(unsigned char)*imageWidth*imageHeight); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device Blurred Image Vector (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } timer.Stop(); printf("Time to allocate memory on the device is: %f msecs.\n", timer.Elapsed()); //Copy the original image from the host to the device and record the needed time GpuTimer timer1; timer1.Start(); //@@ Insert your code here to Copy the original image from the host to the device cudaMemcpy(d_OrigImage, h_OrigImage, sizeof(unsigned char)*imageWidth*imageHeight, cudaMemcpyHostToDevice); timer1.Stop(); printf("Time to copy the Original image from the host to the device is: %f msecs.\n", timer1.Elapsed()); //Do the Processing on the GPU //Kernel Execution Configuration Parameters dim3 dimBlock(16, 16, 1); //@@ Insert Your code Here for grid dimensions dim3 dimGrid(ceil(imageWidth / 16.0), ceil(imageHeight / 16.0), 1); //Invoke the ImageBlur kernel and record the needed time for its execution //GpuTimer timer; GpuTimer timer2; timer2.Start(); //@@ Insert your code here for kernel invocation ImageBlur<<< dimGrid, dimBlock >>>(d_BlurImage, d_OrigImage, imageWidth, imageHeight); timer2.Stop(); printf("Implemented ImageBlur Kernel ran in: %f msecs.\n", timer2.Elapsed()); //Copy resulting blurred image from device to host and record the needed time GpuTimer timer3; timer3.Start(); //@@ Insert your code here to Copy resulting blurred image from device to host cudaMemcpy(h_BlurImage, d_BlurImage, sizeof(unsigned char)*imageWidth*imageHeight, cudaMemcpyDeviceToHost); timer3.Stop(); printf("Time to copy the blurred image from the device to the host is: %f msecs.\n", timer3.Elapsed()); //Do the Processing on the CPU clock_t begin = clock(); //@@ Insert your code her to call the cpu function for ImageBlur on the CPU ImageBlurCpu(h_BlurImage_CPU,h_OrigImage, imageWidth, imageHeight); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC*1000; printf("Implemented CPU code ran in: %f msecs.\n", time_spent); //Postprocess and Display the resulting images using OpenCV Mat Image1(imageHeight, imageWidth,CV_8UC1,h_BlurImage); //grayscale image mat object Mat Image2(imageHeight,imageWidth,CV_8UC1,h_BlurImage_CPU ); //grayscale image mat object namedWindow("CPUImage", WINDOW_NORMAL); //Create window to display the image namedWindow("GPUImage", WINDOW_NORMAL); namedWindow("OriginalImage", WINDOW_NORMAL); imshow("GPUImage",Image1); imshow("CPUImage",Image2); //Display the image in the window imshow("OriginalImage", image); //Display the original image in the window waitKey(0); //Wait till you press a key //Free host memory image.release(); Image1.release(); Image2.release(); free(h_BlurImage); free(h_BlurImage_CPU); //Free device memory //@@ Insert your code here to free device memory cudaFree(d_OrigImage); cudaFree(d_BlurImage); return 0; }
5b89d3db0db2008b4a267ffc1b4d49630e4ce6e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void add2(int *a) { *a = *a + 2; } int main( void ) { int *data; hipMallocManaged(&data, sizeof(int)); *data = 5; hipLaunchKernelGGL(( add2), dim3(1),dim3(1), 0, 0, data); hipDeviceSynchronize(); printf("data: %d\n", *data); hipFree(data); return 0; }
5b89d3db0db2008b4a267ffc1b4d49630e4ce6e6.cu
#include<stdio.h> __global__ void add2(int *a) { *a = *a + 2; } int main( void ) { int *data; cudaMallocManaged(&data, sizeof(int)); *data = 5; add2<<<1,1>>>(data); cudaDeviceSynchronize(); printf("data: %d\n", *data); cudaFree(data); return 0; }
23e01eec3380f3eb910cdefcc8bd193accd77c1a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "updateState.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *external = NULL; hipMalloc(&external, XSIZE*YSIZE); int dim = 2; float timestep = 1; float noise = 1; int length = 1; int totalIterations = 1; int iterationNum = 1; float L = 1; float M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( updateState), dim3(gridBlock),dim3(threadBlock), 0, 0, B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
23e01eec3380f3eb910cdefcc8bd193accd77c1a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "updateState.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *external = NULL; cudaMalloc(&external, XSIZE*YSIZE); int dim = 2; float timestep = 1; float noise = 1; int length = 1; int totalIterations = 1; int iterationNum = 1; float L = 1; float M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { updateState<<<gridBlock,threadBlock>>>(B,external,dim,timestep,noise,length,totalIterations,iterationNum,L,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
31e842f80c618ba0636797360fc73a5cced0bc3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright: See LICENSE file that comes with this distribution * */ #include "cupp/deviceT/vector.h" #include "kernel_t.h" __global__ void global_function (cupp::deviceT::vector<int> &i) { i[threadIdx.x] *= 2; } kernelT get_kernel() { return (kernelT)global_function; }
31e842f80c618ba0636797360fc73a5cced0bc3f.cu
/* * Copyright: See LICENSE file that comes with this distribution * */ #include "cupp/deviceT/vector.h" #include "kernel_t.h" __global__ void global_function (cupp::deviceT::vector<int> &i) { i[threadIdx.x] *= 2; } kernelT get_kernel() { return (kernelT)global_function; }
31a095a33796a8b3010710083b90724af09aad6a.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweepIter(int nPadded, int depth, int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } int offset = 1 << (depth + 1); if (index % offset == 0) { dataPadded[index + offset - 1] += dataPadded[index + (offset >> 1) - 1]; } } __global__ void kernDownSweepIter(int nPadded, int depth, int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } int offset = 1 << (depth + 1); if (index % offset == 0) { int temp = dataPadded[index + (offset >> 1) - 1]; dataPadded[index + (offset >> 1) - 1] = dataPadded[index + offset - 1]; dataPadded[index + offset - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { if (n < 1) { return; } // allocate a buffer padded to a power of 2. int depth = ilog2ceil(n); int nPadded = 1 << depth; int* dev_dataPadded; hipMalloc((void**)&dev_dataPadded, nPadded * sizeof(int)); checkCUDAError("hipMalloc dev_dataPadded failed!"); // set blocks and threads dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid(::ceil((double)nPadded / blockSize)); // copy idata to device memory hipMemset(dev_dataPadded, 0, nPadded * sizeof(int)); checkCUDAError("hipMemset dev_dataPadded failed!"); hipMemcpy(dev_dataPadded, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy dev_dataPadded failed!"); timer().startGpuTimer(); // TODO // perform upsweep on idata for (int i = 0; i < depth; i++) { hipLaunchKernelGGL(( kernUpSweepIter), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, nPadded, i, dev_dataPadded); checkCUDAError("kernUpSweepIter failed!"); } // perform downsweep on idata hipMemset(dev_dataPadded + nPadded - 1, 0, sizeof(int)); checkCUDAError("hipMemset dev_dataPadded + nPadded - 1 failed!"); for (int i = depth - 1; i >= 0; i--) { hipLaunchKernelGGL(( kernDownSweepIter), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, nPadded, i, dev_dataPadded); checkCUDAError("kernDownSweepIter failed!"); } hipDeviceSynchronize(); checkCUDAError("hipDeviceSynchronize failed!"); timer().endGpuTimer(); // copy scan back to host hipMemcpy(odata, dev_dataPadded, n * sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy dev_dataPadded failed!"); hipFree(dev_dataPadded); checkCUDAError("hipFree dev_dataPadded failed!"); } __global__ void kernScatter(int nPadded, const int* idata, int* odata, const int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } if (idata[index]) { odata[dataPadded[index]] = idata[index]; } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO if (n < 1) { return -1; } // allocate a buffer padded to a power of 2. int depth = ilog2ceil(n); int nPadded = 1 << depth; // calling kernels means we cannot directly index into idata. Need to have a device copy int* dev_dataPadded; hipMalloc((void**)&dev_dataPadded, n * sizeof(int)); checkCUDAError("hipMalloc dev_dataPadded failed!"); hipMemset(dev_dataPadded, 0, n * sizeof(int)); checkCUDAError("hipMemset dev_dataPadded failed!"); hipMemcpy(dev_dataPadded, idata, n * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("hipMemcpy dev_dataPadded failed!"); // mapping of true and false for idata int* dev_bools; hipMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAError("hipMalloc dev_bools failed!"); // array that will be scanned into int* dev_index; hipMalloc((void**)&dev_index, nPadded * sizeof(int)); checkCUDAError("hipMalloc dev_index failed!"); hipMemset(dev_index, 0, nPadded * sizeof(int)); checkCUDAError("hipMemset dev_index failed!"); int* dev_odata; hipMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAError("hipMalloc dev_odata failed!"); // set blocks and threads dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid(::ceil((double) nPadded / blockSize)); timer().startGpuTimer(); // SCAN StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_bools, dev_index, dev_dataPadded); checkCUDAError("kernMapToBoolean failed!"); // perform upsweep on idata for (int i = 0; i < depth; i++) { kernUpSweepIter << <fullBlocksPerGrid, threadsPerBlock >> > (nPadded, i, dev_index); checkCUDAError("kernUpSweepIter failed!"); } // perform downsweep on idata hipMemset(dev_index + nPadded - 1, 0, sizeof(int)); checkCUDAError("hipMemset dev_dataPadded + nPadded - 1 failed!"); for (int i = depth - 1; i >= 0; i--) { hipLaunchKernelGGL(( kernDownSweepIter), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, nPadded, i, dev_index); checkCUDAError("kernDownSweepIter failed!"); } // SCATTER StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_odata, dev_dataPadded, dev_bools, dev_index); checkCUDAError("kernScatter failed!"); hipDeviceSynchronize(); checkCUDAError("hipDeviceSynchronize failed!"); timer().endGpuTimer(); // return compact to odata hipMemcpy(odata, dev_odata, n * sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy dev_bools failed!"); // return final index and bool to host to calculate number of elements int idx, val; hipMemcpy((void*)&idx, dev_index + n - 1, sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy idx failed!"); hipMemcpy((void*)&val, dev_bools + n - 1, sizeof(int), hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy val failed!"); // free hipFree(dev_dataPadded); checkCUDAError("hipFree dev_dataPadded failed!"); hipFree(dev_bools); checkCUDAError("hipFree dev_bools failed!"); hipFree(dev_index); checkCUDAError("hipFree dev_index failed!"); hipFree(dev_odata); checkCUDAError("hipFree dev_odata failed!"); return idx + val; } } }
31a095a33796a8b3010710083b90724af09aad6a.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernUpSweepIter(int nPadded, int depth, int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } int offset = 1 << (depth + 1); if (index % offset == 0) { dataPadded[index + offset - 1] += dataPadded[index + (offset >> 1) - 1]; } } __global__ void kernDownSweepIter(int nPadded, int depth, int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } int offset = 1 << (depth + 1); if (index % offset == 0) { int temp = dataPadded[index + (offset >> 1) - 1]; dataPadded[index + (offset >> 1) - 1] = dataPadded[index + offset - 1]; dataPadded[index + offset - 1] += temp; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { if (n < 1) { return; } // allocate a buffer padded to a power of 2. int depth = ilog2ceil(n); int nPadded = 1 << depth; int* dev_dataPadded; cudaMalloc((void**)&dev_dataPadded, nPadded * sizeof(int)); checkCUDAError("cudaMalloc dev_dataPadded failed!"); // set blocks and threads dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid(std::ceil((double)nPadded / blockSize)); // copy idata to device memory cudaMemset(dev_dataPadded, 0, nPadded * sizeof(int)); checkCUDAError("cudaMemset dev_dataPadded failed!"); cudaMemcpy(dev_dataPadded, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy dev_dataPadded failed!"); timer().startGpuTimer(); // TODO // perform upsweep on idata for (int i = 0; i < depth; i++) { kernUpSweepIter<<<fullBlocksPerGrid, threadsPerBlock>>>(nPadded, i, dev_dataPadded); checkCUDAError("kernUpSweepIter failed!"); } // perform downsweep on idata cudaMemset(dev_dataPadded + nPadded - 1, 0, sizeof(int)); checkCUDAError("cudaMemset dev_dataPadded + nPadded - 1 failed!"); for (int i = depth - 1; i >= 0; i--) { kernDownSweepIter<<<fullBlocksPerGrid, threadsPerBlock>>>(nPadded, i, dev_dataPadded); checkCUDAError("kernDownSweepIter failed!"); } cudaDeviceSynchronize(); checkCUDAError("cudaDeviceSynchronize failed!"); timer().endGpuTimer(); // copy scan back to host cudaMemcpy(odata, dev_dataPadded, n * sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy dev_dataPadded failed!"); cudaFree(dev_dataPadded); checkCUDAError("cudaFree dev_dataPadded failed!"); } __global__ void kernScatter(int nPadded, const int* idata, int* odata, const int* dataPadded) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= nPadded) { return; } if (idata[index]) { odata[dataPadded[index]] = idata[index]; } } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // TODO if (n < 1) { return -1; } // allocate a buffer padded to a power of 2. int depth = ilog2ceil(n); int nPadded = 1 << depth; // calling kernels means we cannot directly index into idata. Need to have a device copy int* dev_dataPadded; cudaMalloc((void**)&dev_dataPadded, n * sizeof(int)); checkCUDAError("cudaMalloc dev_dataPadded failed!"); cudaMemset(dev_dataPadded, 0, n * sizeof(int)); checkCUDAError("cudaMemset dev_dataPadded failed!"); cudaMemcpy(dev_dataPadded, idata, n * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy dev_dataPadded failed!"); // mapping of true and false for idata int* dev_bools; cudaMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAError("cudaMalloc dev_bools failed!"); // array that will be scanned into int* dev_index; cudaMalloc((void**)&dev_index, nPadded * sizeof(int)); checkCUDAError("cudaMalloc dev_index failed!"); cudaMemset(dev_index, 0, nPadded * sizeof(int)); checkCUDAError("cudaMemset dev_index failed!"); int* dev_odata; cudaMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAError("cudaMalloc dev_odata failed!"); // set blocks and threads dim3 threadsPerBlock(blockSize); dim3 fullBlocksPerGrid(std::ceil((double) nPadded / blockSize)); timer().startGpuTimer(); // SCAN StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_bools, dev_index, dev_dataPadded); checkCUDAError("kernMapToBoolean failed!"); // perform upsweep on idata for (int i = 0; i < depth; i++) { kernUpSweepIter << <fullBlocksPerGrid, threadsPerBlock >> > (nPadded, i, dev_index); checkCUDAError("kernUpSweepIter failed!"); } // perform downsweep on idata cudaMemset(dev_index + nPadded - 1, 0, sizeof(int)); checkCUDAError("cudaMemset dev_dataPadded + nPadded - 1 failed!"); for (int i = depth - 1; i >= 0; i--) { kernDownSweepIter<<<fullBlocksPerGrid, threadsPerBlock>>>(nPadded, i, dev_index); checkCUDAError("kernDownSweepIter failed!"); } // SCATTER StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_odata, dev_dataPadded, dev_bools, dev_index); checkCUDAError("kernScatter failed!"); cudaDeviceSynchronize(); checkCUDAError("cudaDeviceSynchronize failed!"); timer().endGpuTimer(); // return compact to odata cudaMemcpy(odata, dev_odata, n * sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy dev_bools failed!"); // return final index and bool to host to calculate number of elements int idx, val; cudaMemcpy((void*)&idx, dev_index + n - 1, sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy idx failed!"); cudaMemcpy((void*)&val, dev_bools + n - 1, sizeof(int), cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy val failed!"); // free cudaFree(dev_dataPadded); checkCUDAError("cudaFree dev_dataPadded failed!"); cudaFree(dev_bools); checkCUDAError("cudaFree dev_bools failed!"); cudaFree(dev_index); checkCUDAError("cudaFree dev_index failed!"); cudaFree(dev_odata); checkCUDAError("cudaFree dev_odata failed!"); return idx + val; } } }
4d19d595868c50f752d85d49a0e841dd46f06fc9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include "pcuditas/gpu/gpu_array.cu" __device__ double atomicAddDouble(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template<class ParticleT, class EnvironmentT> __global__ void update_forces_atomic_kernel( ParticleT *particles, int n_particles, EnvironmentT *env_ptr) { EnvironmentT env = (*env_ptr); for (int k = blockIdx.x*blockDim.x + threadIdx.x; k < n_particles*n_particles; k += blockDim.x*gridDim.x) { int i = k % n_particles; int j = k/n_particles; double cutoff_radius = 3.5; auto dr = env.distance_vector( particles[j].position, particles[i].position ); if (dr.magnitude() < cutoff_radius) { auto force = ParticleT::force_law(dr); for( int d=0; d<force.dimensions; ++d ){ atomicAddDouble( &(particles[i].force[d]), force[d] ); } } } } template<class ParticleT, class EnvironmentT> void update_forces_atomic( gpu_array<ParticleT> &particles, gpu_object<EnvironmentT> &environment) { using vector_t = typename ParticleT::vector_type; // First, reset forces particles.for_each([] __device__ (ParticleT& self, int i){ self.force = vector_t::zero(); }); // Launch the kernel! As you can see we are not copying memory from CPU to GPU // as you would normally do with hipMemcpy(), as we don't need to! The // vectors live in GPU already so we just need to know where they start (GPU // pointer) and pass it to the kernel. unsigned int block_size = 1024; unsigned int threads_per_block = 32; hipLaunchKernelGGL(( update_forces_atomic_kernel), dim3(block_size),dim3(threads_per_block), 0, 0, particles.gpu_pointer(), particles.size, environment.gpu_pointer() ); }
4d19d595868c50f752d85d49a0e841dd46f06fc9.cu
#pragma once #include "pcuditas/gpu/gpu_array.cu" __device__ double atomicAddDouble(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template<class ParticleT, class EnvironmentT> __global__ void update_forces_atomic_kernel( ParticleT *particles, int n_particles, EnvironmentT *env_ptr) { EnvironmentT env = (*env_ptr); for (int k = blockIdx.x*blockDim.x + threadIdx.x; k < n_particles*n_particles; k += blockDim.x*gridDim.x) { int i = k % n_particles; int j = k/n_particles; double cutoff_radius = 3.5; auto dr = env.distance_vector( particles[j].position, particles[i].position ); if (dr.magnitude() < cutoff_radius) { auto force = ParticleT::force_law(dr); for( int d=0; d<force.dimensions; ++d ){ atomicAddDouble( &(particles[i].force[d]), force[d] ); } } } } template<class ParticleT, class EnvironmentT> void update_forces_atomic( gpu_array<ParticleT> &particles, gpu_object<EnvironmentT> &environment) { using vector_t = typename ParticleT::vector_type; // First, reset forces particles.for_each([] __device__ (ParticleT& self, int i){ self.force = vector_t::zero(); }); // Launch the kernel! As you can see we are not copying memory from CPU to GPU // as you would normally do with cudaMemcpy(), as we don't need to! The // vectors live in GPU already so we just need to know where they start (GPU // pointer) and pass it to the kernel. unsigned int block_size = 1024; unsigned int threads_per_block = 32; update_forces_atomic_kernel<<<block_size,threads_per_block>>>( particles.gpu_pointer(), particles.size, environment.gpu_pointer() ); }
ab10f5fbff72df6e48f17018988b0f523271a486.hip
// !!! This is a file automatically generated by hipify!!! #include <rocblas.h> #include <hiprand/hiprand.h> #include <hip/hip_runtime.h> #include "gpu_blas.h" void gpu_sgemm(int m, int n, int k, float alpha, float* h_A, int lda, float* h_B, int ldb, float beta, float* h_C, int ldc) { // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); // Fill the arrays A and B on GPU with random numbers float *d_A, *d_B, *d_C; hipMalloc(&d_A, m * n * sizeof(float)); hipMalloc(&d_B, n * k * sizeof(float)); hipMalloc(&d_C, m * k * sizeof(float)); // If you already have useful values in A and B you can copy them in GPU: hipMemcpy(d_A, h_A, m * n * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, n * m * sizeof(float), hipMemcpyHostToDevice); // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); // Copy (and print) the result on host memory hipMemcpy(h_C, d_C, m * k * sizeof(float), hipMemcpyDeviceToHost); // Destroy the handle hipblasDestroy(handle); hipFree(d_A); hipFree(d_B); hipFree(d_C); } void gpu_dgemm(int m, int n, int k, double alpha, double* h_A, int lda, double* h_B, int ldb, double beta, double* h_C, int ldc) { // Create a handle for CUBLAS hipblasHandle_t handle; hipblasCreate(&handle); double *d_A, *d_B, *d_C; hipMalloc(&d_A, m * n * sizeof(double)); hipMalloc(&d_B, n * k * sizeof(double)); hipMalloc(&d_C, m * k * sizeof(double)); hipMemcpy(d_A, h_A, m * n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, n * m * sizeof(double), hipMemcpyHostToDevice); // Do the actual multiplication hipblasDgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); hipMemcpy(h_C, d_C, m * k * sizeof(double), hipMemcpyDeviceToHost); // Destroy the handle hipblasDestroy(handle); hipFree(d_A); hipFree(d_B); hipFree(d_C); }
ab10f5fbff72df6e48f17018988b0f523271a486.cu
#include <cublas_v2.h> #include <curand.h> #include <cuda_runtime.h> #include "gpu_blas.h" void gpu_sgemm(int m, int n, int k, float alpha, float* h_A, int lda, float* h_B, int ldb, float beta, float* h_C, int ldc) { // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); // Fill the arrays A and B on GPU with random numbers float *d_A, *d_B, *d_C; cudaMalloc(&d_A, m * n * sizeof(float)); cudaMalloc(&d_B, n * k * sizeof(float)); cudaMalloc(&d_C, m * k * sizeof(float)); // If you already have useful values in A and B you can copy them in GPU: cudaMemcpy(d_A, h_A, m * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, n * m * sizeof(float), cudaMemcpyHostToDevice); // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); // Copy (and print) the result on host memory cudaMemcpy(h_C, d_C, m * k * sizeof(float), cudaMemcpyDeviceToHost); // Destroy the handle cublasDestroy(handle); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } void gpu_dgemm(int m, int n, int k, double alpha, double* h_A, int lda, double* h_B, int ldb, double beta, double* h_C, int ldc) { // Create a handle for CUBLAS cublasHandle_t handle; cublasCreate(&handle); double *d_A, *d_B, *d_C; cudaMalloc(&d_A, m * n * sizeof(double)); cudaMalloc(&d_B, n * k * sizeof(double)); cudaMalloc(&d_C, m * k * sizeof(double)); cudaMemcpy(d_A, h_A, m * n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, n * m * sizeof(double), cudaMemcpyHostToDevice); // Do the actual multiplication cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, &alpha, d_A, lda, d_B, ldb, &beta, d_C, ldc); cudaMemcpy(h_C, d_C, m * k * sizeof(double), cudaMemcpyDeviceToHost); // Destroy the handle cublasDestroy(handle); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
89aec38554887382229f4a850b2ebe8da60f8936.hip
// !!! This is a file automatically generated by hipify!!! /** * Johannes and David * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <ctime> #include <hip/hip_runtime.h> #include "rocblas.h" #include "time.h" #include <cstdlib> #include <iostream> //using std::string; //using std::cout; //using std::endl; #define IDX2C(i,j,ld) (((j)*(ld))+(i)) //modify index for 0-based indexing #define max(A, B) ((A) > (B) ? (A) : (B)) #define min(A, B) ((A) < (B) ? (A) : (B)) // error check macros #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, hipGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // for CUBLAS V2 API #define cublasCheckErrors(fn) \ do { \ hipblasStatus_t __err = fn; \ if (__err != HIPBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \ (int)(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // data filler void fillvector(double *data, int N, double value){ for(int i=0; i<N; i++){ data[i] = value; } } // measuring time class Timer { public: Timer() : tStart(0), running(false), sec(0.f) { } void start() { tStart = clock(); running = true; } void end() { if (!running) { sec = 0; return; } hipDeviceSynchronize(); clock_t tEnd = clock(); sec = (float)(tEnd - tStart) / CLOCKS_PER_SEC; running = false; } float get() { if (running) end(); return sec; } private: clock_t tStart; bool running; float sec; }; // Calculates Pt1 using threads __global__ void calc_Pt1(double* d_Pt1, double* d_sp, double outlier_tmp, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { d_Pt1[idx] = 1.0f - (outlier_tmp/(d_sp[idx] + outlier_tmp)); } } // Use threads to calculate E, later we sum up __global__ void calc_E(double* d_E, double* d_sp, double outlier_tmp, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { d_E[idx] = -log(d_sp[idx] + outlier_tmp); } } // Calculates Px using threads __global__ void calc_X_tmp(double* d_Xtemp, double* d_X, double* d_denom, int starting_index, int slice_size, int D, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int d = threadIdx.y + blockDim.y * blockIdx.y; if (idx < slice_size && d < D) { // Create d_Xtemp (slice_size * D) d_Xtemp[IDX2C(idx,d,slice_size)] = d_denom[idx] * d_X[IDX2C(idx + starting_index,d,N)]; } } // Calculates slice_size denominator using threads __global__ void calc_denominator(double* d_denom, double* d_sp, double outlier_tmp, int slice_size) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < slice_size) { d_denom[idx] = 1.0f / (d_sp[idx] + outlier_tmp); } } // Kernel calculating the nominators of each entry of P (for 6980 x 6980 it takes 160ms) __global__ void calc_nominator(double* d_X, double* d_Y, double* d_PSlice, double ksig, int N, int M, int D, int slice_size, int slice_nr){ int idx = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int i = idx + (slice_size*slice_nr); if (idx < slice_size && i<N && j < M){ double diff = 0; double razn = 0; for (int d=0; d < D; d++) { //iterate through D dimensions diff=d_X[i+d*N] - d_Y[j+d*M]; diff=diff*diff; //take the square of the euclidean norm of one scalar -> square the scalar razn+=diff; //proposed name: eucl_dist_sqr; add up the differences for each dimension to get the scalar length of the high-dimensional vector } // Set it using a row-major -> column major translator (for CUBLAS and MATLAB) d_PSlice[IDX2C(i%slice_size,j,slice_size)]=exp(razn/ksig); //nominator } } void cpd_comp( double* x, double* y, double* sigma2, double* outlier, double* P1, double* Pt1, double* Px, double* E, int N, int M, int D ) { int n, m, d; double ksig, diff, razn, outlier_tmp, sp; double *P, *temp_x; double *PSlice; int slice_size = N/10; double *ones; double *filler; P = (double*) calloc(M, sizeof(double)); temp_x = (double*) calloc(D, sizeof(double)); PSlice = (double*) calloc(slice_size*M, sizeof(double)); ones = (double*) calloc(M, sizeof(double)); filler = (double*) calloc(N,sizeof(double)); ksig = -2.0 * *sigma2; outlier_tmp=(*outlier*M*pow (-ksig*3.14159265358979,0.5*D))/((1-*outlier)*N); fillvector(ones, M, 1); fillvector(filler,N,0); /* printf ("ksig = %lf\n", *sigma2);*/ /* outlier_tmp=*outlier*N/(1- *outlier)/M*(-ksig*3.14159265358979); */ // CUBLAS Stuff hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; double* d_X; double* d_Y; double* d_PSlice; double* d_PSlice_mat; double* d_P1; double* d_P1_tmp; double* d_Pt1; double* d_Px; double* d_E; double* d_ones; double* slice_tmp; slice_tmp = (double *)malloc(M*D*sizeof(double)); double* d_sp; double* d_denom; //stores a denominator vector double* d_X_tmp; //stores a sliced X * denom version of X //TODO: Finish Matrix Vector Multiplication // Allocate memory on the device hipMalloc (&d_X, N*D*sizeof(double)); hipMalloc (&d_Y, M*D*sizeof(double)); hipMalloc (&d_PSlice, M*slice_size*sizeof(double)); hipMalloc (&d_P1, N*sizeof(double)); hipMalloc (&d_P1_tmp, N*sizeof(double)); hipMalloc (&d_Pt1, M*sizeof(double)); hipMalloc (&d_Px, M*D*sizeof(double)); hipMalloc (&d_E, N*sizeof(double)); hipMalloc (&d_ones, M * sizeof(double)); hipMalloc (&d_sp, N*sizeof(double)); hipMalloc (&d_denom, slice_size*sizeof(double)); hipMalloc (&d_X_tmp, slice_size*D*sizeof(double)); cudaCheckErrors("cuda malloc fail"); // Create CUBLAS Context stat = hipblasCreate(&handle); // TODO: Load data in the beginning instead of every time! hipMemcpy(d_X, x, N*D* sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_Y, y, M*D* sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_ones, ones, M*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_sp, filler, N*sizeof(double), hipMemcpyHostToDevice); // Cpy Px to GPU once! hipMemcpy(d_Px, Px, N*D*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_P1, P1, N*sizeof(double),hipMemcpyHostToDevice); int numSlices = N / slice_size; dim3 block; dim3 grid; block = dim3(4, 32, 1); grid = dim3((slice_size + block.x - 1) / block.x, (M + block.y - 1) / block.y); Timer timer; timer.start(); for (int s=0; s<numSlices; s++){ //mexPrintf("\n Iteration %i \n",s); block = dim3(4, 32, 1); grid = dim3((slice_size + block.x - 1) / block.x, (M + block.y - 1) / block.y); hipLaunchKernelGGL(( calc_nominator) , dim3(grid), dim3(block), 0, 0, d_X, d_Y, d_PSlice, ksig, N, M, D, slice_size, s); double alpha = 1.0f; double beta = 0.0f; int rowsA = slice_size; int columnsA = M; // Calculates sp without outlier stat = hipblasDgemv(handle, HIPBLAS_OP_N, rowsA, columnsA, &alpha, d_PSlice, slice_size, d_ones, 1, &beta, d_sp+(s*slice_size), 1); cublasCheckErrors(stat); // Get the denominator as 1/sp + outlier in d_denom block = dim3(256, 1, 1); grid = dim3((slice_size + block.x - 1) / block.x,1); // denominator correctly calculates! (tested for 6890) hipLaunchKernelGGL(( calc_denominator) , dim3(grid), dim3(block), 0, 0, d_denom, d_sp+(s*slice_size), outlier_tmp, slice_size); // Calculate P1 using PSlice_t * denom stat = hipblasDgemv(handle, HIPBLAS_OP_T, rowsA, columnsA, &alpha, d_PSlice, slice_size, d_denom, 1, &beta, d_P1_tmp, 1); cublasCheckErrors(stat); // Add P1_tmp to P1 stat = hipblasDaxpy(handle, M, &alpha, d_P1_tmp, 1, d_P1, 1); cublasCheckErrors(stat); // Calculate Px block = dim3(64, 4, 1); grid = dim3((slice_size + block.x - 1) / block.x, (D + block.y - 1) / block.y); // First calculate X_temp_sliced (takes 50ms) hipLaunchKernelGGL(( calc_X_tmp) , dim3(grid), dim3(block), 0, 0, d_X_tmp, d_X, d_denom, (s*slice_size), slice_size, D, N); // Do PSlice_t * X_tmp =+ Px beta = 1.0f; stat = hipblasDgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, M, D, slice_size, &alpha, d_PSlice, slice_size, d_X_tmp, slice_size, &beta, d_Px, M); cublasCheckErrors(stat); } // Calculates the complete P1 block = dim3(256, 1, 1); grid = dim3((N + block.x - 1) / block.x,1); hipLaunchKernelGGL(( calc_Pt1) , dim3(grid), dim3(block), 0, 0, d_Pt1, d_sp, outlier_tmp, N); // Calculate E hipLaunchKernelGGL(( calc_E) , dim3(grid), dim3(block), 0, 0, d_E, d_sp, outlier_tmp, N); // Sum up E stat = hipblasDasum(handle, N, d_E, 1, &*E); *E +=D*N*log(*sigma2)/2; hipMemcpy(Pt1, d_Pt1, N*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(Px, d_Px, M*D*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(P1, d_P1, N* sizeof(double), hipMemcpyDeviceToHost); // Free Device Space, so MATLAB doesnt crash hipFree(d_X); hipFree(d_Y); hipFree(d_PSlice); hipFree(d_P1); hipFree(d_P1_tmp); hipFree(d_Pt1); hipFree(d_Px); hipFree(d_E); hipFree(d_ones); hipFree(d_sp); hipFree(d_denom); hipFree(d_X_tmp); timer.end(); float t = timer.get(); // elapsed time in seconds mexPrintf("\n GPU Time: %f ms \n",t * 1000); //for (n=0; n < N; n++) { //mexPrintf ("\n"); // use for printing P[m] // sp=0; // for (m=0; m < M; m++) { //iterate through all points (M: width of y) // razn=0; // for (d=0; d < D; d++) { //iterate through D dimensions // diff=x[n+d*N] - y[m+d*M];// *(x+n+d*N)-*(y+m+d*M); // diff=diff*diff; //take the square of the euclidean norm of one scalar -> square the scalar // razn+=diff; //proposed name: eucl_dist_sqr; add up the differences for each dimension to get the scalar length of the high-dimensional vector // } // // P[m]=exp(razn/ksig); //nominator // // sp+=P[m]; //sum in the denominator // } //sp+=outlier_tmp; //for this particular x point, we calculate the complete denominator // Test out if everything works with Pt1 from GPU //Pt1[n] = 1-outlier_tmp/ sp; //see documentation: (1 - ca) // if((float)slice_tmp[n] != (float)(1.0f/sp)){ // // mexPrintf("Assertion failed! %d - denom[n] on GPU/CPU: %f - %f \n",n, slice_tmp[n], 1.0f/sp); // // mexPrintf("sp on CPU: %f \n",sp); // } // for (d=0; d < D; d++) { // temp_x[d]=x[n+d*N]/ sp; // } // // for (m=0; m < M; m++) { // // P1[m]+=P[m]/ sp; //P1 is the P * sum_vector from the equation (see documentation) // // for (d=0; d < D; d++) { // Px[m+d*M]+= temp_x[d]*P[m]; // } // // } //*E += -log(sp); //entropy: measure of overall change //} // Test for P1 // for (int i = 0; i < M; i++) { // for (int d =0; d < D; d++) { // if((float)slice_tmp[IDX2C(i,d,M)] != (float)Px[IDX2C(i,d,M)]){ // // mexPrintf("Assertion failed! %d - Px[n] on GPU/CPU: %f - %f \n",i,slice_tmp[IDX2C(i,d,M)], Px[IDX2C(i,d,M)]); // // mexPrintf("sp on CPU: %f \n",sp); // } // } // } // //*E +=D*N*log(*sigma2)/2; free((void*)P); free((void*)PSlice); free((void*)temp_x); free((void*)ones); free((void*)filler); free((void*)slice_tmp); return; } /* Input arguments */ #define IN_x prhs[0] #define IN_y prhs[1] #define IN_sigma2 prhs[2] #define IN_outlier prhs[3] /* Output arguments */ #define OUT_P1 plhs[0] #define OUT_Pt1 plhs[1] #define OUT_Px plhs[2] #define OUT_E plhs[3] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; double *x, *y, *sigma2, *outlier, *P1, *Pt1, *Px, *E; int N, M, D; /* Get the sizes of each input argument */ N = mxGetM(IN_x); M = mxGetM(IN_y); D = mxGetN(IN_x); /* Create the new arrays and set the output pointers to them */ OUT_P1 = mxCreateDoubleMatrix(M, 1, mxREAL); OUT_Pt1 = mxCreateDoubleMatrix(N, 1, mxREAL); OUT_Px = mxCreateDoubleMatrix(M, D, mxREAL); OUT_E = mxCreateDoubleMatrix(1, 1, mxREAL); /* Assign pointers to the input arguments */ x = mxGetPr(IN_x); y = mxGetPr(IN_y); sigma2 = mxGetPr(IN_sigma2); outlier = mxGetPr(IN_outlier); /* Assign pointers to the output arguments */ P1 = mxGetPr(OUT_P1); Pt1 = mxGetPr(OUT_Pt1); Px = mxGetPr(OUT_Px); E = mxGetPr(OUT_E); /* Do the actual computations in a subroutine */ cpd_comp(x, y, sigma2, outlier, P1, Pt1, Px, E, N, M, D); return; }
89aec38554887382229f4a850b2ebe8da60f8936.cu
/** * Johannes and David * TU Munich * Sep 2015 */ #include <stdio.h> #include "mex.h" #include <iostream> #include <vector> #include <cmath> #include <ctime> #include <cuda_runtime.h> #include "cublas_v2.h" #include "time.h" #include <cstdlib> #include <iostream> //using std::string; //using std::cout; //using std::endl; #define IDX2C(i,j,ld) (((j)*(ld))+(i)) //modify index for 0-based indexing #define max(A, B) ((A) > (B) ? (A) : (B)) #define min(A, B) ((A) < (B) ? (A) : (B)) // error check macros #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // for CUBLAS V2 API #define cublasCheckErrors(fn) \ do { \ cublasStatus_t __err = fn; \ if (__err != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \ (int)(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // data filler void fillvector(double *data, int N, double value){ for(int i=0; i<N; i++){ data[i] = value; } } // measuring time class Timer { public: Timer() : tStart(0), running(false), sec(0.f) { } void start() { tStart = clock(); running = true; } void end() { if (!running) { sec = 0; return; } cudaDeviceSynchronize(); clock_t tEnd = clock(); sec = (float)(tEnd - tStart) / CLOCKS_PER_SEC; running = false; } float get() { if (running) end(); return sec; } private: clock_t tStart; bool running; float sec; }; // Calculates Pt1 using threads __global__ void calc_Pt1(double* d_Pt1, double* d_sp, double outlier_tmp, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { d_Pt1[idx] = 1.0f - (outlier_tmp/(d_sp[idx] + outlier_tmp)); } } // Use threads to calculate E, later we sum up __global__ void calc_E(double* d_E, double* d_sp, double outlier_tmp, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { d_E[idx] = -log(d_sp[idx] + outlier_tmp); } } // Calculates Px using threads __global__ void calc_X_tmp(double* d_Xtemp, double* d_X, double* d_denom, int starting_index, int slice_size, int D, int N) { int idx = threadIdx.x + blockDim.x * blockIdx.x; int d = threadIdx.y + blockDim.y * blockIdx.y; if (idx < slice_size && d < D) { // Create d_Xtemp (slice_size * D) d_Xtemp[IDX2C(idx,d,slice_size)] = d_denom[idx] * d_X[IDX2C(idx + starting_index,d,N)]; } } // Calculates slice_size denominator using threads __global__ void calc_denominator(double* d_denom, double* d_sp, double outlier_tmp, int slice_size) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < slice_size) { d_denom[idx] = 1.0f / (d_sp[idx] + outlier_tmp); } } // Kernel calculating the nominators of each entry of P (for 6980 x 6980 it takes 160ms) __global__ void calc_nominator(double* d_X, double* d_Y, double* d_PSlice, double ksig, int N, int M, int D, int slice_size, int slice_nr){ int idx = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int i = idx + (slice_size*slice_nr); if (idx < slice_size && i<N && j < M){ double diff = 0; double razn = 0; for (int d=0; d < D; d++) { //iterate through D dimensions diff=d_X[i+d*N] - d_Y[j+d*M]; diff=diff*diff; //take the square of the euclidean norm of one scalar -> square the scalar razn+=diff; //proposed name: eucl_dist_sqr; add up the differences for each dimension to get the scalar length of the high-dimensional vector } // Set it using a row-major -> column major translator (for CUBLAS and MATLAB) d_PSlice[IDX2C(i%slice_size,j,slice_size)]=exp(razn/ksig); //nominator } } void cpd_comp( double* x, double* y, double* sigma2, double* outlier, double* P1, double* Pt1, double* Px, double* E, int N, int M, int D ) { int n, m, d; double ksig, diff, razn, outlier_tmp, sp; double *P, *temp_x; double *PSlice; int slice_size = N/10; double *ones; double *filler; P = (double*) calloc(M, sizeof(double)); temp_x = (double*) calloc(D, sizeof(double)); PSlice = (double*) calloc(slice_size*M, sizeof(double)); ones = (double*) calloc(M, sizeof(double)); filler = (double*) calloc(N,sizeof(double)); ksig = -2.0 * *sigma2; outlier_tmp=(*outlier*M*pow (-ksig*3.14159265358979,0.5*D))/((1-*outlier)*N); fillvector(ones, M, 1); fillvector(filler,N,0); /* printf ("ksig = %lf\n", *sigma2);*/ /* outlier_tmp=*outlier*N/(1- *outlier)/M*(-ksig*3.14159265358979); */ // CUBLAS Stuff cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; double* d_X; double* d_Y; double* d_PSlice; double* d_PSlice_mat; double* d_P1; double* d_P1_tmp; double* d_Pt1; double* d_Px; double* d_E; double* d_ones; double* slice_tmp; slice_tmp = (double *)malloc(M*D*sizeof(double)); double* d_sp; double* d_denom; //stores a denominator vector double* d_X_tmp; //stores a sliced X * denom version of X //TODO: Finish Matrix Vector Multiplication // Allocate memory on the device cudaMalloc (&d_X, N*D*sizeof(double)); cudaMalloc (&d_Y, M*D*sizeof(double)); cudaMalloc (&d_PSlice, M*slice_size*sizeof(double)); cudaMalloc (&d_P1, N*sizeof(double)); cudaMalloc (&d_P1_tmp, N*sizeof(double)); cudaMalloc (&d_Pt1, M*sizeof(double)); cudaMalloc (&d_Px, M*D*sizeof(double)); cudaMalloc (&d_E, N*sizeof(double)); cudaMalloc (&d_ones, M * sizeof(double)); cudaMalloc (&d_sp, N*sizeof(double)); cudaMalloc (&d_denom, slice_size*sizeof(double)); cudaMalloc (&d_X_tmp, slice_size*D*sizeof(double)); cudaCheckErrors("cuda malloc fail"); // Create CUBLAS Context stat = cublasCreate(&handle); // TODO: Load data in the beginning instead of every time! cudaMemcpy(d_X, x, N*D* sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, y, M*D* sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_ones, ones, M*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_sp, filler, N*sizeof(double), cudaMemcpyHostToDevice); // Cpy Px to GPU once! cudaMemcpy(d_Px, Px, N*D*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_P1, P1, N*sizeof(double),cudaMemcpyHostToDevice); int numSlices = N / slice_size; dim3 block; dim3 grid; block = dim3(4, 32, 1); grid = dim3((slice_size + block.x - 1) / block.x, (M + block.y - 1) / block.y); Timer timer; timer.start(); for (int s=0; s<numSlices; s++){ //mexPrintf("\n Iteration %i \n",s); block = dim3(4, 32, 1); grid = dim3((slice_size + block.x - 1) / block.x, (M + block.y - 1) / block.y); calc_nominator <<<grid, block>>> (d_X, d_Y, d_PSlice, ksig, N, M, D, slice_size, s); double alpha = 1.0f; double beta = 0.0f; int rowsA = slice_size; int columnsA = M; // Calculates sp without outlier stat = cublasDgemv(handle, CUBLAS_OP_N, rowsA, columnsA, &alpha, d_PSlice, slice_size, d_ones, 1, &beta, d_sp+(s*slice_size), 1); cublasCheckErrors(stat); // Get the denominator as 1/sp + outlier in d_denom block = dim3(256, 1, 1); grid = dim3((slice_size + block.x - 1) / block.x,1); // denominator correctly calculates! (tested for 6890) calc_denominator <<<grid, block>>> (d_denom, d_sp+(s*slice_size), outlier_tmp, slice_size); // Calculate P1 using PSlice_t * denom stat = cublasDgemv(handle, CUBLAS_OP_T, rowsA, columnsA, &alpha, d_PSlice, slice_size, d_denom, 1, &beta, d_P1_tmp, 1); cublasCheckErrors(stat); // Add P1_tmp to P1 stat = cublasDaxpy(handle, M, &alpha, d_P1_tmp, 1, d_P1, 1); cublasCheckErrors(stat); // Calculate Px block = dim3(64, 4, 1); grid = dim3((slice_size + block.x - 1) / block.x, (D + block.y - 1) / block.y); // First calculate X_temp_sliced (takes 50ms) calc_X_tmp <<<grid, block>>> (d_X_tmp, d_X, d_denom, (s*slice_size), slice_size, D, N); // Do PSlice_t * X_tmp =+ Px beta = 1.0f; stat = cublasDgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, M, D, slice_size, &alpha, d_PSlice, slice_size, d_X_tmp, slice_size, &beta, d_Px, M); cublasCheckErrors(stat); } // Calculates the complete P1 block = dim3(256, 1, 1); grid = dim3((N + block.x - 1) / block.x,1); calc_Pt1 <<<grid, block>>> (d_Pt1, d_sp, outlier_tmp, N); // Calculate E calc_E <<<grid, block>>> (d_E, d_sp, outlier_tmp, N); // Sum up E stat = cublasDasum(handle, N, d_E, 1, &*E); *E +=D*N*log(*sigma2)/2; cudaMemcpy(Pt1, d_Pt1, N*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(Px, d_Px, M*D*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(P1, d_P1, N* sizeof(double), cudaMemcpyDeviceToHost); // Free Device Space, so MATLAB doesnt crash cudaFree(d_X); cudaFree(d_Y); cudaFree(d_PSlice); cudaFree(d_P1); cudaFree(d_P1_tmp); cudaFree(d_Pt1); cudaFree(d_Px); cudaFree(d_E); cudaFree(d_ones); cudaFree(d_sp); cudaFree(d_denom); cudaFree(d_X_tmp); timer.end(); float t = timer.get(); // elapsed time in seconds mexPrintf("\n GPU Time: %f ms \n",t * 1000); //for (n=0; n < N; n++) { //mexPrintf ("\n"); // use for printing P[m] // sp=0; // for (m=0; m < M; m++) { //iterate through all points (M: width of y) // razn=0; // for (d=0; d < D; d++) { //iterate through D dimensions // diff=x[n+d*N] - y[m+d*M];// *(x+n+d*N)-*(y+m+d*M); // diff=diff*diff; //take the square of the euclidean norm of one scalar -> square the scalar // razn+=diff; //proposed name: eucl_dist_sqr; add up the differences for each dimension to get the scalar length of the high-dimensional vector // } // // P[m]=exp(razn/ksig); //nominator // // sp+=P[m]; //sum in the denominator // } //sp+=outlier_tmp; //for this particular x point, we calculate the complete denominator // Test out if everything works with Pt1 from GPU //Pt1[n] = 1-outlier_tmp/ sp; //see documentation: (1 - ca) // if((float)slice_tmp[n] != (float)(1.0f/sp)){ // // mexPrintf("Assertion failed! %d - denom[n] on GPU/CPU: %f - %f \n",n, slice_tmp[n], 1.0f/sp); // // mexPrintf("sp on CPU: %f \n",sp); // } // for (d=0; d < D; d++) { // temp_x[d]=x[n+d*N]/ sp; // } // // for (m=0; m < M; m++) { // // P1[m]+=P[m]/ sp; //P1 is the P * sum_vector from the equation (see documentation) // // for (d=0; d < D; d++) { // Px[m+d*M]+= temp_x[d]*P[m]; // } // // } //*E += -log(sp); //entropy: measure of overall change //} // Test for P1 // for (int i = 0; i < M; i++) { // for (int d =0; d < D; d++) { // if((float)slice_tmp[IDX2C(i,d,M)] != (float)Px[IDX2C(i,d,M)]){ // // mexPrintf("Assertion failed! %d - Px[n] on GPU/CPU: %f - %f \n",i,slice_tmp[IDX2C(i,d,M)], Px[IDX2C(i,d,M)]); // // mexPrintf("sp on CPU: %f \n",sp); // } // } // } // //*E +=D*N*log(*sigma2)/2; free((void*)P); free((void*)PSlice); free((void*)temp_x); free((void*)ones); free((void*)filler); free((void*)slice_tmp); return; } /* Input arguments */ #define IN_x prhs[0] #define IN_y prhs[1] #define IN_sigma2 prhs[2] #define IN_outlier prhs[3] /* Output arguments */ #define OUT_P1 plhs[0] #define OUT_Pt1 plhs[1] #define OUT_Px plhs[2] #define OUT_E plhs[3] /* Gateway routine */ void mexFunction( int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; double *x, *y, *sigma2, *outlier, *P1, *Pt1, *Px, *E; int N, M, D; /* Get the sizes of each input argument */ N = mxGetM(IN_x); M = mxGetM(IN_y); D = mxGetN(IN_x); /* Create the new arrays and set the output pointers to them */ OUT_P1 = mxCreateDoubleMatrix(M, 1, mxREAL); OUT_Pt1 = mxCreateDoubleMatrix(N, 1, mxREAL); OUT_Px = mxCreateDoubleMatrix(M, D, mxREAL); OUT_E = mxCreateDoubleMatrix(1, 1, mxREAL); /* Assign pointers to the input arguments */ x = mxGetPr(IN_x); y = mxGetPr(IN_y); sigma2 = mxGetPr(IN_sigma2); outlier = mxGetPr(IN_outlier); /* Assign pointers to the output arguments */ P1 = mxGetPr(OUT_P1); Pt1 = mxGetPr(OUT_Pt1); Px = mxGetPr(OUT_Px); E = mxGetPr(OUT_E); /* Do the actual computations in a subroutine */ cpd_comp(x, y, sigma2, outlier, P1, Pt1, Px, E, N, M, D); return; }
1eecf07af33745a77a744b9b4fafe32faa233c4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> // includes, kernels #include "gauss_eliminate_kernel.cu" #define MIN_NUMBER 2 #define MAX_NUMBER 50 extern "C" int compute_gold(float*, const float*, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix M); Matrix allocate_matrix(int num_rows, int num_columns, int init); void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost); void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice); void gauss_eliminate_on_device(const Matrix M, Matrix P); int perform_simple_check(const Matrix M); void print_matrix(const Matrix M); void write_matrix_to_file(const Matrix M); float get_random_number(int, int); void checkCUDAError(const char *msg); int checkResults(float *reference, float *gpu_result, int num_elements, float threshold); float tp; int main(int argc, char** argv) { // Matrices for the program Matrix A; // The NxN input matrix Matrix U; // The upper triangular matrix struct timeval t1,t2; // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); U = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Perform Gaussian elimination on the CPU Matrix reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); gettimeofday(&t1, NULL); int status = compute_gold(reference.elements, A.elements, A.num_rows); gettimeofday(&t2, NULL); printf("Serial execution time = %fs. \n", (float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000)); float ts=(float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000); if(status == 0){ printf("Failed to convert given matrix to upper triangular. Try again. Exiting. \n"); exit(0); } status = perform_simple_check(reference); // Check that the principal diagonal elements are 1 if(status == 0){ printf("The upper triangular matrix is incorrect. Exiting. \n"); exit(0); } printf("Gaussian elimination on the CPU was successful. \n"); // Perform the vector-matrix multiplication on the GPU. Return the result in U gauss_eliminate_on_device(A, U); float speedup=ts/tp; printf("Speedup: %f\n",speedup); // check if the device result is equivalent to the expected solution int num_elements = MATRIX_SIZE*MATRIX_SIZE; int res = checkResults(reference.elements, U.elements, num_elements, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(U.elements); U.elements = NULL; free(reference.elements); reference.elements = NULL; return 0; } void gauss_eliminate_on_device(const Matrix A, Matrix U){ int num_elements=A.num_rows,i,j; for (i = 0; i < num_elements; i ++) for(j = 0; j < num_elements; j++) U.elements[num_elements * i + j] = A.elements[num_elements*i + j]; Matrix U_gpu = allocate_matrix_on_gpu(U); copy_matrix_to_device(U_gpu, U); struct timeval t1,t2; int grid_size = (num_elements / BLOCK_SIZE) ; //grid parameters for GPU dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid(1, grid_size); gettimeofday(&t1, NULL); i=0; while ( i < num_elements - 1) { hipLaunchKernelGGL(( gauss_eliminate_kernel_divide) , dim3(grid), dim3(thread_block) , 0, 0, U_gpu.elements, i, num_elements); hipDeviceSynchronize(); hipLaunchKernelGGL(( gauss_eliminate_kernel_eliminate) , dim3(grid), dim3(thread_block) , 0, 0, U_gpu.elements, i,num_elements); hipDeviceSynchronize(); ++i; } gettimeofday(&t2, NULL); printf("Execution time = %fs. \n", (float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000)); tp=(float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000); copy_matrix_from_device(U, U_gpu); U.elements[(num_elements * num_elements) - 1] = 1; hipFree(U_gpu.elements); } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M){ Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init){ Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){ int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M){ for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_rows + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } // Performs a simple check on the upper triangular matrix. Checks to see if the principal diagonal elements are 1 int perform_simple_check(const Matrix M){ for(unsigned int i = 0; i < M.num_rows; i++) if((fabs(M.elements[M.num_rows*i + i] - 1.0)) > 0.001) return 0; return 1; } // Writes the matrix to a file void write_matrix_to_file(const Matrix M){ FILE *fp; fp = fopen("matrix.txt", "wt"); for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) fprintf(fp, "%f", M.elements[i*M.num_rows + j]); } fclose(fp); } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
1eecf07af33745a77a744b9b4fafe32faa233c4e.cu
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> // includes, kernels #include "gauss_eliminate_kernel.cu" #define MIN_NUMBER 2 #define MAX_NUMBER 50 extern "C" int compute_gold(float*, const float*, unsigned int); Matrix allocate_matrix_on_gpu(const Matrix M); Matrix allocate_matrix(int num_rows, int num_columns, int init); void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost); void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice); void gauss_eliminate_on_device(const Matrix M, Matrix P); int perform_simple_check(const Matrix M); void print_matrix(const Matrix M); void write_matrix_to_file(const Matrix M); float get_random_number(int, int); void checkCUDAError(const char *msg); int checkResults(float *reference, float *gpu_result, int num_elements, float threshold); float tp; int main(int argc, char** argv) { // Matrices for the program Matrix A; // The NxN input matrix Matrix U; // The upper triangular matrix struct timeval t1,t2; // Initialize the random number generator with a seed value srand(time(NULL)); // Check command line arguments if(argc > 1){ printf("Error. This program accepts no arguments. \n"); exit(0); } // Allocate and initialize the matrices A = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 1); U = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); // Perform Gaussian elimination on the CPU Matrix reference = allocate_matrix(MATRIX_SIZE, MATRIX_SIZE, 0); gettimeofday(&t1, NULL); int status = compute_gold(reference.elements, A.elements, A.num_rows); gettimeofday(&t2, NULL); printf("Serial execution time = %fs. \n", (float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000)); float ts=(float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000); if(status == 0){ printf("Failed to convert given matrix to upper triangular. Try again. Exiting. \n"); exit(0); } status = perform_simple_check(reference); // Check that the principal diagonal elements are 1 if(status == 0){ printf("The upper triangular matrix is incorrect. Exiting. \n"); exit(0); } printf("Gaussian elimination on the CPU was successful. \n"); // Perform the vector-matrix multiplication on the GPU. Return the result in U gauss_eliminate_on_device(A, U); float speedup=ts/tp; printf("Speedup: %f\n",speedup); // check if the device result is equivalent to the expected solution int num_elements = MATRIX_SIZE*MATRIX_SIZE; int res = checkResults(reference.elements, U.elements, num_elements, 0.001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); // Free host matrices free(A.elements); A.elements = NULL; free(U.elements); U.elements = NULL; free(reference.elements); reference.elements = NULL; return 0; } void gauss_eliminate_on_device(const Matrix A, Matrix U){ int num_elements=A.num_rows,i,j; for (i = 0; i < num_elements; i ++) for(j = 0; j < num_elements; j++) U.elements[num_elements * i + j] = A.elements[num_elements*i + j]; Matrix U_gpu = allocate_matrix_on_gpu(U); copy_matrix_to_device(U_gpu, U); struct timeval t1,t2; int grid_size = (num_elements / BLOCK_SIZE) ; //grid parameters for GPU dim3 thread_block(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid(1, grid_size); gettimeofday(&t1, NULL); i=0; while ( i < num_elements - 1) { gauss_eliminate_kernel_divide <<< grid, thread_block >>> (U_gpu.elements, i, num_elements); cudaThreadSynchronize(); gauss_eliminate_kernel_eliminate <<< grid, thread_block >>> (U_gpu.elements, i,num_elements); cudaThreadSynchronize(); ++i; } gettimeofday(&t2, NULL); printf("Execution time = %fs. \n", (float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000)); tp=(float)(t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec)/(float)1000000); copy_matrix_from_device(U, U_gpu); U.elements[(num_elements * num_elements) - 1] = 1; cudaFree(U_gpu.elements); } // Allocate a device matrix of same size as M. Matrix allocate_matrix_on_gpu(const Matrix M){ Matrix Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } // Allocate a matrix of dimensions height*width // If init == 0, initialize to all zeroes. // If init == 1, perform random initialization. Matrix allocate_matrix(int num_rows, int num_columns, int init){ Matrix M; M.num_columns = M.pitch = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < size; i++){ if(init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number(MIN_NUMBER, MAX_NUMBER); } return M; } // Copy a host matrix to a device matrix. void copy_matrix_to_device(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof(float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void copy_matrix_from_device(Matrix Mhost, const Matrix Mdevice){ int size = Mdevice.num_rows * Mdevice.num_columns * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); } // Prints the matrix out to screen void print_matrix(const Matrix M){ for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) printf("%f ", M.elements[i*M.num_rows + j]); printf("\n"); } printf("\n"); } // Returns a random floating-point number between the specified min and max values float get_random_number(int min, int max){ return (float)floor((double)(min + (max - min + 1)*((float)rand()/(float)RAND_MAX))); } // Performs a simple check on the upper triangular matrix. Checks to see if the principal diagonal elements are 1 int perform_simple_check(const Matrix M){ for(unsigned int i = 0; i < M.num_rows; i++) if((fabs(M.elements[M.num_rows*i + i] - 1.0)) > 0.001) return 0; return 1; } // Writes the matrix to a file void write_matrix_to_file(const Matrix M){ FILE *fp; fp = fopen("matrix.txt", "wt"); for(unsigned int i = 0; i < M.num_rows; i++){ for(unsigned int j = 0; j < M.num_columns; j++) fprintf(fp, "%f", M.elements[i*M.num_rows + j]); } fclose(fp); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int checkResults(float *reference, float *gpu_result, int num_elements, float threshold) { int checkMark = 1; float epsilon = 0.0; for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > threshold){ checkMark = 0; break; } for(int i = 0; i < num_elements; i++) if(fabsf((reference[i] - gpu_result[i])/reference[i]) > epsilon){ epsilon = fabsf((reference[i] - gpu_result[i])/reference[i]); } printf("Max epsilon = %f. \n", epsilon); return checkMark; }
0bb893003451c0b3ea8ea6e8225b10b66ab692b7.hip
// !!! This is a file automatically generated by hipify!!! #include "grad_h.h" #if MATLAB_MEX_FILE #include "mex.h" #define PRINTF mexPrintf #else #define PRINTF printf #endif #include <cmath> #include <vector> #include <thread> #include <assert.h> #include <hip/hip_runtime.h> #include <rocblas.h> using namespace std; const int nT = 1024; #define checkCuda(result) checkCudaInner(result, __LINE__) // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline hipError_t checkCudaInner(hipError_t result, int lineno) { if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s on line %d\n", hipGetErrorString(result), lineno); assert(result == hipSuccess); } return result; } __host__ __device__ inline double entropy(double acc, double Ez) { assert(Ez > 0); return (acc - Ez * log(Ez)) / Ez; } // B is a matrix with K columns and N rows template<typename T> __global__ void computeEz(const T * __restrict__ Br, const T * __restrict__ Bi, const T * __restrict__ Zr, const T * __restrict__ Zi, const T * __restrict__ Ar, const T * __restrict__ Ai, T * __restrict__ g_Ez, T * __restrict__ g_acc, size_t N, size_t K) { extern __shared__ T sdata[]; T *Ez = sdata, *acc = &sdata[blockDim.x]; T x(0.0), y(0.0); const int k = blockIdx.x; // Accumulate per thread partial sum over columns of B* for (int n = threadIdx.x; n < N; n += blockDim.x) { double Zn_r = Ar[k] * Br[n * K + k] - Ai[k] * Bi[n * K + k] + Zr[n]; double Zn_i = Ar[k] * Bi[n * K + k] + Ai[k] * Br[n * K + k] + Zi[n]; double Zn_mag = Zn_r * Zn_r + Zn_i * Zn_i; assert(Zn_mag >= 0); x += Zn_mag; y += Zn_mag * log(Zn_mag); } // load thread partial sum into shared memory Ez[threadIdx.x] = x; acc[threadIdx.x] = y; __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { Ez[threadIdx.x] += Ez[threadIdx.x + offset]; acc[threadIdx.x] += acc[threadIdx.x + offset]; } __syncthreads(); } // thread 0 writes the final result if (threadIdx.x == 0) { g_Ez[blockIdx.x] += Ez[0]; g_acc[blockIdx.x] += acc[0]; } } // B is a matrix with K columns and N rows template<typename T> __global__ void kernelSum(const T * __restrict__ Br, const T * __restrict__ Bi, T * __restrict__ Zr, T * __restrict__ Zi, const size_t K, T * __restrict__ Z_mag, const T * __restrict__ P) { extern __shared__ T sdata[]; T *s1 = sdata, *s2 = &sdata[blockDim.x]; T x(0.0), y(0.0); const T * Br_row = &Br[blockIdx.x * K]; const T * Bi_row = &Bi[blockIdx.x * K]; // Accumulate per thread partial sum double sin, cos; for (int i = threadIdx.x; i < K; i += blockDim.x) { sincos(P[i % K], &sin, &cos); x += Br_row[i] * cos + Bi_row[i] * sin; y += Bi_row[i] * cos - Br_row[i] * sin; } // load thread partial sum into shared memory s1[threadIdx.x] = x; s2[threadIdx.x] = y; __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { s1[threadIdx.x] += s1[threadIdx.x + offset]; s2[threadIdx.x] += s2[threadIdx.x + offset]; } __syncthreads(); } // thread 0 writes the final result if (threadIdx.x == 0) { Zr[blockIdx.x] = s1[0]; Zi[blockIdx.x] = s2[0]; Z_mag[blockIdx.x] = s1[0] * s1[0] + s2[0] * s2[0]; assert(Z_mag[blockIdx.x] >= 0); } } template <class T> __global__ void computeEntropy(T *Z_mag, T *d_acc, unsigned int n) { extern __shared__ T sdata[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; assert(Z_mag[i] >= 0); sdata[tid] = (i < n) ? Z_mag[i] * log(Z_mag[i]) : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) { d_acc[blockIdx.x] = sdata[0]; } } __global__ void computeAlpha(const double *P, double sin_delt, double cos_delt, double *Ar, double *Ai, size_t K) { unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; double sin_phi, cos_phi; if (k < K) { sincos(P[k], &sin_phi, &cos_phi); Ar[k] = (-sin_delt) * sin_phi + cos_delt * cos_phi - cos_phi; Ai[k] = sin_delt * (-cos_phi) - cos_delt * sin_phi + sin_phi; } } __global__ void computeGrad(double *grad, const double *acc, const double *Ez, double H0, double delta, size_t K) { unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; if (k < K) { grad[k] = (-entropy(acc[k], Ez[k]) - H0) / delta; } } // Returns the entropy of the complex image `Z` void H_not(const double *d_P, double *d_Br, double *d_Bi, double *Zr, double *Zi, double *Ez, double *acc, size_t K, size_t B_len) { const size_t N = B_len / K; assert(B_len % K == 0); // length(B) should always be a multiple of K double *d_Z_mag = NULL; checkCuda(hipMalloc((void **)&d_Z_mag, N * sizeof(double))); hipLaunchKernelGGL(( kernelSum<double>), dim3(N), dim3(nT), 2 * nT * sizeof(double), 0, d_Br, d_Bi, Zr, Zi, K, d_Z_mag, d_P); int bs = (N + nT - 1) / nT; // cheap ceil() double *d_accum = NULL; double *accum = NULL; checkCuda(hipHostMalloc((void **)&accum, bs * sizeof(double))); checkCuda(hipMalloc((void **)&d_accum, bs * sizeof(double))); hipblasHandle_t handle; hipblasCreate(&handle); hipblasSetPointerMode(handle, HIPBLAS_POINTER_MODE_HOST); // hipblasDasum sums the absolute values, but Z_mag is always positive so this // is correct double sum = 0; hipblasDasum(handle, N, d_Z_mag, 1, &sum); *Ez += sum; hipLaunchKernelGGL(( computeEntropy<double>), dim3(bs), dim3(nT), nT * sizeof(double), 0, d_Z_mag, d_accum, N); checkCuda(hipMemcpy(accum, d_accum, bs * sizeof(double), hipMemcpyDeviceToHost)); for (size_t b(0); b < bs; ++b) { *acc += accum[b]; } checkCuda(hipFree(d_Z_mag)); checkCuda(hipFree(d_accum)); hipblasDestroy(handle); } // TODO: Nice doc comments void gradH(double *phi_offsets, const double *Br, const double *Bi, double *grad, size_t K, size_t B_len) { // const auto maxMem = 2147483648 / 2; // 1 GB - size of half of GPU physical memory const size_t N = B_len / K; // Solve for N: // (4 * K + 2 * N + 2 * K * N_prime) * sizeof(double); // size_t N_prime = min((maxMem / sizeof(double) - 4 * K - 2 * N) / (2 * K), N); size_t N_prime = 64000; // TODO: Determine a more precise value size_t B_len_prime = N_prime * K; double *d_Br, *d_Bi, *d_P, *d_Zr, *d_Zi, *d_Ez, *d_acc, *d_Ar, *d_Ai, *d_grad; double Ez = 0, acc = 0; // TODO: Use pinned memory checkCuda(hipMalloc((void **)&d_P, K * sizeof(double))); checkCuda(hipMemcpy(d_P, phi_offsets, K * sizeof(double), hipMemcpyHostToDevice)); checkCuda(hipMalloc((void **)&d_Zr, N * sizeof(double))); checkCuda(hipMalloc((void **)&d_Zi, N * sizeof(double))); checkCuda(hipMalloc((void **)&d_Br, B_len_prime * sizeof(double))); checkCuda(hipMalloc((void **)&d_Bi, B_len_prime * sizeof(double))); checkCuda(hipMalloc((void **)&d_Ar, K * sizeof(double))); checkCuda(hipMalloc((void **)&d_Ai, K * sizeof(double))); checkCuda(hipMalloc((void **)&d_Ez, K * sizeof(double))); checkCuda(hipMalloc((void **)&d_acc, K * sizeof(double))); checkCuda(hipMalloc((void **)&d_grad, K * sizeof(double))); checkCuda(hipMemset(d_Ez, 0, K * sizeof(double))); checkCuda(hipMemset(d_acc, 0, K * sizeof(double))); double sin_delt, cos_delt; sincos(delta, &sin_delt, &cos_delt); hipLaunchKernelGGL(( computeAlpha), dim3((K + nT - 1) / nT), dim3(nT), 0, 0, d_P, sin_delt, cos_delt, d_Ar, d_Ai, K); PRINTF("In gradH, about to compute Z\n"); PRINTF("Computed Z\n"); size_t num_iter = ceil((float)B_len / B_len_prime); for (size_t i(0); i < num_iter; ++i) { size_t len = min(B_len_prime, B_len - i * B_len_prime); checkCuda(hipMemcpy(d_Br, &Br[i * B_len_prime], len * sizeof(double), hipMemcpyHostToDevice)); checkCuda(hipMemcpy(d_Bi, &Bi[i * B_len_prime], len * sizeof(double), hipMemcpyHostToDevice)); H_not(d_P, d_Br, d_Bi, &d_Zr[i * N_prime], &d_Zi[i * N_prime], &Ez, &acc, K, len); hipLaunchKernelGGL(( computeEz<double>), dim3(K), dim3(nT), 2 * nT * sizeof(double), 0, d_Br, d_Bi, &d_Zr[i * N_prime], &d_Zi[i * N_prime], d_Ar, d_Ai, d_Ez, d_acc, len / K, K); } double H0 = -entropy(acc, Ez); PRINTF("Computed H_not=%f\n", H0); hipLaunchKernelGGL(( computeGrad), dim3((K + nT - 1) / nT), dim3(nT), 0, 0, d_grad, d_acc, d_Ez, H0, delta, K); checkCuda(hipMemcpy(grad, d_grad, K * sizeof(double), hipMemcpyDeviceToHost)); checkCuda(hipFree(d_Br)); checkCuda(hipFree(d_Bi)); checkCuda(hipFree(d_Zr)); checkCuda(hipFree(d_Zi)); checkCuda(hipFree(d_P)); checkCuda(hipFree(d_Ar)); checkCuda(hipFree(d_Ai)); checkCuda(hipFree(d_grad)); }
0bb893003451c0b3ea8ea6e8225b10b66ab692b7.cu
#include "grad_h.h" #if MATLAB_MEX_FILE #include "mex.h" #define PRINTF mexPrintf #else #define PRINTF printf #endif #include <cmath> #include <vector> #include <thread> #include <assert.h> #include <cuda_runtime.h> #include <cublas_v2.h> using namespace std; const int nT = 1024; #define checkCuda(result) checkCudaInner(result, __LINE__) // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCudaInner(cudaError_t result, int lineno) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s on line %d\n", cudaGetErrorString(result), lineno); assert(result == cudaSuccess); } return result; } __host__ __device__ inline double entropy(double acc, double Ez) { assert(Ez > 0); return (acc - Ez * log(Ez)) / Ez; } // B is a matrix with K columns and N rows template<typename T> __global__ void computeEz(const T * __restrict__ Br, const T * __restrict__ Bi, const T * __restrict__ Zr, const T * __restrict__ Zi, const T * __restrict__ Ar, const T * __restrict__ Ai, T * __restrict__ g_Ez, T * __restrict__ g_acc, size_t N, size_t K) { extern __shared__ T sdata[]; T *Ez = sdata, *acc = &sdata[blockDim.x]; T x(0.0), y(0.0); const int k = blockIdx.x; // Accumulate per thread partial sum over columns of B* for (int n = threadIdx.x; n < N; n += blockDim.x) { double Zn_r = Ar[k] * Br[n * K + k] - Ai[k] * Bi[n * K + k] + Zr[n]; double Zn_i = Ar[k] * Bi[n * K + k] + Ai[k] * Br[n * K + k] + Zi[n]; double Zn_mag = Zn_r * Zn_r + Zn_i * Zn_i; assert(Zn_mag >= 0); x += Zn_mag; y += Zn_mag * log(Zn_mag); } // load thread partial sum into shared memory Ez[threadIdx.x] = x; acc[threadIdx.x] = y; __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { Ez[threadIdx.x] += Ez[threadIdx.x + offset]; acc[threadIdx.x] += acc[threadIdx.x + offset]; } __syncthreads(); } // thread 0 writes the final result if (threadIdx.x == 0) { g_Ez[blockIdx.x] += Ez[0]; g_acc[blockIdx.x] += acc[0]; } } // B is a matrix with K columns and N rows template<typename T> __global__ void kernelSum(const T * __restrict__ Br, const T * __restrict__ Bi, T * __restrict__ Zr, T * __restrict__ Zi, const size_t K, T * __restrict__ Z_mag, const T * __restrict__ P) { extern __shared__ T sdata[]; T *s1 = sdata, *s2 = &sdata[blockDim.x]; T x(0.0), y(0.0); const T * Br_row = &Br[blockIdx.x * K]; const T * Bi_row = &Bi[blockIdx.x * K]; // Accumulate per thread partial sum double sin, cos; for (int i = threadIdx.x; i < K; i += blockDim.x) { sincos(P[i % K], &sin, &cos); x += Br_row[i] * cos + Bi_row[i] * sin; y += Bi_row[i] * cos - Br_row[i] * sin; } // load thread partial sum into shared memory s1[threadIdx.x] = x; s2[threadIdx.x] = y; __syncthreads(); for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { s1[threadIdx.x] += s1[threadIdx.x + offset]; s2[threadIdx.x] += s2[threadIdx.x + offset]; } __syncthreads(); } // thread 0 writes the final result if (threadIdx.x == 0) { Zr[blockIdx.x] = s1[0]; Zi[blockIdx.x] = s2[0]; Z_mag[blockIdx.x] = s1[0] * s1[0] + s2[0] * s2[0]; assert(Z_mag[blockIdx.x] >= 0); } } template <class T> __global__ void computeEntropy(T *Z_mag, T *d_acc, unsigned int n) { extern __shared__ T sdata[]; // load shared mem unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; assert(Z_mag[i] >= 0); sdata[tid] = (i < n) ? Z_mag[i] * log(Z_mag[i]) : 0; __syncthreads(); // do reduction in shared mem for (unsigned int s=1; s < blockDim.x; s *= 2) { // modulo arithmetic is slow! if ((tid % (2*s)) == 0) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) { d_acc[blockIdx.x] = sdata[0]; } } __global__ void computeAlpha(const double *P, double sin_delt, double cos_delt, double *Ar, double *Ai, size_t K) { unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; double sin_phi, cos_phi; if (k < K) { sincos(P[k], &sin_phi, &cos_phi); Ar[k] = (-sin_delt) * sin_phi + cos_delt * cos_phi - cos_phi; Ai[k] = sin_delt * (-cos_phi) - cos_delt * sin_phi + sin_phi; } } __global__ void computeGrad(double *grad, const double *acc, const double *Ez, double H0, double delta, size_t K) { unsigned int k = blockIdx.x*blockDim.x + threadIdx.x; if (k < K) { grad[k] = (-entropy(acc[k], Ez[k]) - H0) / delta; } } // Returns the entropy of the complex image `Z` void H_not(const double *d_P, double *d_Br, double *d_Bi, double *Zr, double *Zi, double *Ez, double *acc, size_t K, size_t B_len) { const size_t N = B_len / K; assert(B_len % K == 0); // length(B) should always be a multiple of K double *d_Z_mag = NULL; checkCuda(cudaMalloc((void **)&d_Z_mag, N * sizeof(double))); kernelSum<double><<<N, nT, 2 * nT * sizeof(double)>>>(d_Br, d_Bi, Zr, Zi, K, d_Z_mag, d_P); int bs = (N + nT - 1) / nT; // cheap ceil() double *d_accum = NULL; double *accum = NULL; checkCuda(cudaMallocHost((void **)&accum, bs * sizeof(double))); checkCuda(cudaMalloc((void **)&d_accum, bs * sizeof(double))); cublasHandle_t handle; cublasCreate(&handle); cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_HOST); // cublasDasum sums the absolute values, but Z_mag is always positive so this // is correct double sum = 0; cublasDasum(handle, N, d_Z_mag, 1, &sum); *Ez += sum; computeEntropy<double><<<bs, nT, nT * sizeof(double)>>>(d_Z_mag, d_accum, N); checkCuda(cudaMemcpy(accum, d_accum, bs * sizeof(double), cudaMemcpyDeviceToHost)); for (size_t b(0); b < bs; ++b) { *acc += accum[b]; } checkCuda(cudaFree(d_Z_mag)); checkCuda(cudaFree(d_accum)); cublasDestroy(handle); } // TODO: Nice doc comments void gradH(double *phi_offsets, const double *Br, const double *Bi, double *grad, size_t K, size_t B_len) { // const auto maxMem = 2147483648 / 2; // 1 GB - size of half of GPU physical memory const size_t N = B_len / K; // Solve for N: // (4 * K + 2 * N + 2 * K * N_prime) * sizeof(double); // size_t N_prime = min((maxMem / sizeof(double) - 4 * K - 2 * N) / (2 * K), N); size_t N_prime = 64000; // TODO: Determine a more precise value size_t B_len_prime = N_prime * K; double *d_Br, *d_Bi, *d_P, *d_Zr, *d_Zi, *d_Ez, *d_acc, *d_Ar, *d_Ai, *d_grad; double Ez = 0, acc = 0; // TODO: Use pinned memory checkCuda(cudaMalloc((void **)&d_P, K * sizeof(double))); checkCuda(cudaMemcpy(d_P, phi_offsets, K * sizeof(double), cudaMemcpyHostToDevice)); checkCuda(cudaMalloc((void **)&d_Zr, N * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Zi, N * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Br, B_len_prime * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Bi, B_len_prime * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Ar, K * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Ai, K * sizeof(double))); checkCuda(cudaMalloc((void **)&d_Ez, K * sizeof(double))); checkCuda(cudaMalloc((void **)&d_acc, K * sizeof(double))); checkCuda(cudaMalloc((void **)&d_grad, K * sizeof(double))); checkCuda(cudaMemset(d_Ez, 0, K * sizeof(double))); checkCuda(cudaMemset(d_acc, 0, K * sizeof(double))); double sin_delt, cos_delt; sincos(delta, &sin_delt, &cos_delt); computeAlpha<<<(K + nT - 1) / nT, nT>>>(d_P, sin_delt, cos_delt, d_Ar, d_Ai, K); PRINTF("In gradH, about to compute Z\n"); PRINTF("Computed Z\n"); size_t num_iter = ceil((float)B_len / B_len_prime); for (size_t i(0); i < num_iter; ++i) { size_t len = min(B_len_prime, B_len - i * B_len_prime); checkCuda(cudaMemcpy(d_Br, &Br[i * B_len_prime], len * sizeof(double), cudaMemcpyHostToDevice)); checkCuda(cudaMemcpy(d_Bi, &Bi[i * B_len_prime], len * sizeof(double), cudaMemcpyHostToDevice)); H_not(d_P, d_Br, d_Bi, &d_Zr[i * N_prime], &d_Zi[i * N_prime], &Ez, &acc, K, len); computeEz<double><<<K, nT, 2 * nT * sizeof(double)>>>(d_Br, d_Bi, &d_Zr[i * N_prime], &d_Zi[i * N_prime], d_Ar, d_Ai, d_Ez, d_acc, len / K, K); } double H0 = -entropy(acc, Ez); PRINTF("Computed H_not=%f\n", H0); computeGrad<<<(K + nT - 1) / nT, nT>>>(d_grad, d_acc, d_Ez, H0, delta, K); checkCuda(cudaMemcpy(grad, d_grad, K * sizeof(double), cudaMemcpyDeviceToHost)); checkCuda(cudaFree(d_Br)); checkCuda(cudaFree(d_Bi)); checkCuda(cudaFree(d_Zr)); checkCuda(cudaFree(d_Zi)); checkCuda(cudaFree(d_P)); checkCuda(cudaFree(d_Ar)); checkCuda(cudaFree(d_Ai)); checkCuda(cudaFree(d_grad)); }
02815bc431fd232b20ca734c417c161d1b201383.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/local_response_normalization_op.h" namespace caffe2 { namespace { template <typename T> __global__ void LRNFillScaleNCHW(const int nthreads, const T* in, const int channels, const int height, const int width, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; in += offset; scale += offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; T accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad) { accum_scale += in[head * step] * in[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_scale += in[head * step] * in[head * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // recover the pointers for the next loop. in -= offset; scale -= offset; } } template <typename T> __global__ void LRNFillScaleNHWC(const int nthreads, const T *const in, const int height, const int width, const int channels, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int pre_pad = (size - 1) / 2; scale[index] = 0; for (int i = 0; i < size; ++i) { const int raw_idx = c + i - pre_pad; if (raw_idx >= 0 && raw_idx < channels) { scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad]; } } scale[index] = bias + scale[index] * alpha_over_size; } } // TODO(Yangqing): check if it would be faster to just put it into the previous // kernel. template <typename T> __global__ void LRNComputeOutput(const int nthreads, const T* in, const T* scale, const T negative_beta, T* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename T> __global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int channels, const int height, const int width, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; bottom_data += offset; top_data += offset; scale += offset; top_diff += offset; bottom_diff += offset; int head = 0; int pre_pad = size - (size + 1) / 2; int post_pad = size - pre_pad - 1; T accum_ratio = 0; // accumulate values while (head < post_pad) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // recover pointer for next iteration. bottom_data -= offset; top_data -= offset; scale -= offset; top_diff -= offset; bottom_diff -= offset; } } // This local response normalization gradient does one sum per output location // and does not use the running trick for 1-d convolution: thus it might not be // the fastest implementation. template <typename T> __global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int height, const int width, const int channels, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local channel offset const int c = index % channels; const int pre_pad = size / 2; T accum_ratio = 0; for (int i = -pre_pad; i < size - pre_pad; ++i) { if (c + i >= 0 && c + i < channels) { accum_ratio += top_diff[index + i] * top_data[index + i] / scale[index + i]; } } bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) - cache_ratio * bottom_data[index] * accum_ratio; } } } // namespace template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, C, H, W, size_, alpha_ / size_, bias_, scale_data); C10_HIP_KERNEL_LAUNCH_CHECK(); n_threads = X.numel(); hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, scale_data, -beta_, Ydata); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.numel(); hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, H, W, C, size_, alpha_ / size_, bias_, scale_data); C10_HIP_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LRNComputeOutput<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, scale_data, -beta_, Ydata); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.numel(), Y.numel()); DCHECK_EQ(X.numel(), dY.numel()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); const float* Xdata = X.data<float>(); const float* Ydata = Y.data<float>(); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float *const scale_data = scale_->template mutable_data<float>(); const int n_threads = N * H * W; hipLaunchKernelGGL(( LRNFillScaleNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, C, H, W, size_, alpha_ / size_, bias_, scale_data); C10_HIP_KERNEL_LAUNCH_CHECK(); const float *const dYdata = dY.data<float>(); float *const dXdata = dX->template mutable_data<float>(); hipLaunchKernelGGL(( LRNComputeDiffNCHW<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, Ydata, scale_data, dYdata, C, H, W, size_, -beta_, 2.f * alpha_ * beta_ / size_, dXdata); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.numel(), Y.numel()); DCHECK_EQ(X.numel(), dY.numel()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.numel(); hipLaunchKernelGGL(( LRNFillScaleNHWC<float>), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), n_threads, Xdata, H, W, C, size_, alpha_ / size_, bias_, scale_data); C10_HIP_KERNEL_LAUNCH_CHECK(); hipLaunchKernelGGL(( LRNComputeDiffNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.numel(), X.data<float>(), Y.data<float>(), scale_data, dY.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), size_, -beta_, 2.f * alpha_ * beta_ / size_, dX->template mutable_data<float>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>); } // namespace caffe2
02815bc431fd232b20ca734c417c161d1b201383.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/local_response_normalization_op.h" namespace caffe2 { namespace { template <typename T> __global__ void LRNFillScaleNCHW(const int nthreads, const T* in, const int channels, const int height, const int width, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; in += offset; scale += offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; T accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad) { accum_scale += in[head * step] * in[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_scale += in[head * step] * in[head * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // both add and subtract while (head < channels) { accum_scale += in[head * step] * in[head * step]; accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { accum_scale -= in[(head - size) * step] * in[(head - size) * step]; scale[(head - post_pad) * step] = bias + accum_scale * alpha_over_size; ++head; } // recover the pointers for the next loop. in -= offset; scale -= offset; } } template <typename T> __global__ void LRNFillScaleNHWC(const int nthreads, const T *const in, const int height, const int width, const int channels, const int size, const T alpha_over_size, const T bias, T* scale) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int pre_pad = (size - 1) / 2; scale[index] = 0; for (int i = 0; i < size; ++i) { const int raw_idx = c + i - pre_pad; if (raw_idx >= 0 && raw_idx < channels) { scale[index] += in[index + i - pre_pad] * in[index + i - pre_pad]; } } scale[index] = bias + scale[index] * alpha_over_size; } } // TODO(Yangqing): check if it would be faster to just put it into the previous // kernel. template <typename T> __global__ void LRNComputeOutput(const int nthreads, const T* in, const T* scale, const T negative_beta, T* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename T> __global__ void LRNComputeDiffNCHW(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int channels, const int height, const int width, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; bottom_data += offset; top_data += offset; scale += offset; top_diff += offset; bottom_diff += offset; int head = 0; int pre_pad = size - (size + 1) / 2; int post_pad = size - pre_pad - 1; T accum_ratio = 0; // accumulate values while (head < post_pad) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; ++head; } // until we reach size, nothing needs to be subtracted while (head < size) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff[head * step] * top_data[head * step] / scale[head * step]; accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { accum_ratio -= top_diff[(head - size) * step] * top_data[(head - size) * step] / scale[(head - size) * step]; bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step] * pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_data[(head - post_pad) * step] * accum_ratio; ++head; } // recover pointer for next iteration. bottom_data -= offset; top_data -= offset; scale -= offset; top_diff -= offset; bottom_diff -= offset; } } // This local response normalization gradient does one sum per output location // and does not use the running trick for 1-d convolution: thus it might not be // the fastest implementation. template <typename T> __global__ void LRNComputeDiffNHWC(const int nthreads, const T* bottom_data, const T* top_data, const T* scale, const T* top_diff, const int height, const int width, const int channels, const int size, const T negative_beta, const T cache_ratio, T* bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local channel offset const int c = index % channels; const int pre_pad = size / 2; T accum_ratio = 0; for (int i = -pre_pad; i < size - pre_pad; ++i) { if (c + i >= 0 && c + i < channels) { accum_ratio += top_diff[index + i] * top_data[index + i] / scale[index + i]; } } bottom_diff[index] = top_diff[index] * pow(scale[index], negative_beta) - cache_ratio * bottom_data[index] * accum_ratio; } } } // namespace template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = N * H * W; LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, C, H, W, size_, alpha_ / size_, bias_, scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); n_threads = X.numel(); LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, scale_data, -beta_, Ydata); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template<> bool LRNOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); auto* Y = Output(0, X.sizes(), at::dtype<float>()); float* Ydata = Y->template mutable_data<float>(); if (OutputSize() > 1) { scale_ = Output(1); } else { if (!scale_) { scale_ = &local_scale_tensor_; } } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.numel(); LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, H, W, C, size_, alpha_ / size_, bias_, scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); LRNComputeOutput<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, scale_data, -beta_, Ydata); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int C = X.dim32(1); const int H = X.dim32(2); const int W = X.dim32(3); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.numel(), Y.numel()); DCHECK_EQ(X.numel(), dY.numel()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); const float* Xdata = X.data<float>(); const float* Ydata = Y.data<float>(); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float *const scale_data = scale_->template mutable_data<float>(); const int n_threads = N * H * W; LRNFillScaleNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, C, H, W, size_, alpha_ / size_, bias_, scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); const float *const dYdata = dY.data<float>(); float *const dXdata = dX->template mutable_data<float>(); LRNComputeDiffNCHW<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, Ydata, scale_data, dYdata, C, H, W, size_, -beta_, 2.f * alpha_ * beta_ / size_, dXdata); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template <> bool LRNGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); DCHECK_EQ(X.dim(), 4); const int N = X.dim32(0); const int H = X.dim32(1); const int W = X.dim32(2); const int C = X.dim32(3); const float* Xdata = X.data<float>(); // Loosely checking the size, assuming that the shapes will be the same as // long as the sizes check out. DCHECK_EQ(X.numel(), Y.numel()); DCHECK_EQ(X.numel(), dY.numel()); auto* dX = Output(0, X.sizes(), at::dtype<float>()); if (!scale_) { scale_ = &local_scale_tensor_; } scale_->ResizeLike(X); float* scale_data = scale_->template mutable_data<float>(); int n_threads = X.numel(); LRNFillScaleNHWC<float><<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( n_threads, Xdata, H, W, C, size_, alpha_ / size_, bias_, scale_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); LRNComputeDiffNHWC<float> <<<CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.numel(), X.data<float>(), Y.data<float>(), scale_data, dY.data<float>(), X.dim32(1), X.dim32(2), X.dim32(3), size_, -beta_, 2.f * alpha_ * beta_ / size_, dX->template mutable_data<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR(LRN, LRNOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LRNGradient, LRNGradientOp<float, CUDAContext>); } // namespace caffe2
7bcea0400fbb40b18fe270aa398d140e6c94f82e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_4_b; int xdim0_update_halo_kernel3_plus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_4_b; int ydim0_update_halo_kernel3_plus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_4_b; int xdim1_update_halo_kernel3_plus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_4_b; int ydim1_update_halo_kernel3_plus_4_b_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_plus_4_b*(y)+xdim0_update_halo_kernel3_plus_4_b*ydim0_update_halo_kernel3_plus_4_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_plus_4_b*(y)+xdim1_update_halo_kernel3_plus_4_b*ydim1_update_halo_kernel3_plus_4_b*(z)) //user function __device__ inline void update_halo_kernel3_plus_4_b(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = vol_flux_x[OPS_ACC0(0,-4,0)]; if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = mass_flux_x[OPS_ACC1(0,-4,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_4_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel3_plus_4_b + idx_z * 1 * xdim0_update_halo_kernel3_plus_4_b * ydim0_update_halo_kernel3_plus_4_b; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel3_plus_4_b + idx_z * 1 * xdim1_update_halo_kernel3_plus_4_b * ydim1_update_halo_kernel3_plus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_4_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(91,"update_halo_kernel3_plus_4_b"); OPS_kernels[91].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel3_plus_4_b_h || ydim0 != ydim0_update_halo_kernel3_plus_4_b_h || xdim1 != xdim1_update_halo_kernel3_plus_4_b_h || ydim1 != ydim1_update_halo_kernel3_plus_4_b_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel3_plus_4_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_plus_4_b_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel3_plus_4_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_plus_4_b_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel3_plus_4_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_plus_4_b_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel3_plus_4_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_plus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[91].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[91].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[91].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[91].transfer += ops_compute_transfer(dim, range, &arg1); }
7bcea0400fbb40b18fe270aa398d140e6c94f82e.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_plus_4_b; int xdim0_update_halo_kernel3_plus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel3_plus_4_b; int ydim0_update_halo_kernel3_plus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel3_plus_4_b; int xdim1_update_halo_kernel3_plus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel3_plus_4_b; int ydim1_update_halo_kernel3_plus_4_b_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_plus_4_b*(y)+xdim0_update_halo_kernel3_plus_4_b*ydim0_update_halo_kernel3_plus_4_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_plus_4_b*(y)+xdim1_update_halo_kernel3_plus_4_b*ydim1_update_halo_kernel3_plus_4_b*(z)) //user function __device__ inline void update_halo_kernel3_plus_4_b(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = vol_flux_x[OPS_ACC0(0,-4,0)]; if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = mass_flux_x[OPS_ACC1(0,-4,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_plus_4_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel3_plus_4_b + idx_z * 1 * xdim0_update_halo_kernel3_plus_4_b * ydim0_update_halo_kernel3_plus_4_b; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel3_plus_4_b + idx_z * 1 * xdim1_update_halo_kernel3_plus_4_b * ydim1_update_halo_kernel3_plus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_plus_4_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel3_plus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(91,"update_halo_kernel3_plus_4_b"); OPS_kernels[91].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel3_plus_4_b_h || ydim0 != ydim0_update_halo_kernel3_plus_4_b_h || xdim1 != xdim1_update_halo_kernel3_plus_4_b_h || ydim1 != ydim1_update_halo_kernel3_plus_4_b_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel3_plus_4_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_plus_4_b_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel3_plus_4_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_plus_4_b_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel3_plus_4_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_plus_4_b_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel3_plus_4_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_plus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[91].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_plus_4_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[91].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[91].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[91].transfer += ops_compute_transfer(dim, range, &arg1); }
c2de4dba3b872c3698f68977de9e0ed4bdc62a9a.hip
// !!! This is a file automatically generated by hipify!!! #include "ProdThrustFunctor.hh" __device__ fptype device_ProdPdfs (fptype* evt, fptype* p, unsigned int* indices) { // Index structure is nP | F1 P1 | F2 P2 | ... // where nP is number of parameters, Fs are function indices, and Ps are parameter indices int numParams = indices[0]; fptype ret = 1; for (int i = 1; i < numParams; i += 2) { int fcnIdx = indices[i + 0]; int parIdx = indices[i + 1]; fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[fcnIdx])))(evt, p, paramIndices + parIdx); curr *= normalisationFactors[parIdx]; ret *= curr; //if ((0 == threadIdx.x) && (0 == blockIdx.x) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices)) //if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices)) //if (0.0001 < ret) //if ((gpuDebug & 1) && (isnan(curr))) //if (gpuDebug & 1) //if ((gpuDebug & 1) && (0 == threadIdx.x)) //printf("device_Prod: (%f %f %f %f %f) %f %f %f %i %i %i\n", evt[0], evt[1], evt[2], evt[3], evt[4], curr, ret, normalisationFactors[parIdx], i, parIdx, numParams); //printf("(%i, %i) device_Prod: (%f %f %f %f) %f %f %f %i\n", blockIdx.x, threadIdx.x, evt[0], evt[8], evt[6], evt[7], curr, ret, normalisationFactors[parIdx], i); //printf("(%i, %i) device_Prod: (%f %f) %f %f %f %i\n", blockIdx.x, threadIdx.x, evt[0], evt[1], curr, ret, normalisationFactors[parIdx], i); } return ret; } __device__ device_function_ptr ptr_to_ProdPdfs = device_ProdPdfs; ProdThrustFunctor::ProdThrustFunctor (std::string n, std::vector<FunctorBase*> comps) : ThrustPdfFunctor(0, n) , varOverlaps(false) { std::vector<unsigned int> pindices; for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { assert(*p); components.push_back(*p); } getObservables(observables); // Gathers from components FunctorBase::obsCont observableCheck; // Use to check for overlap in observables // Indices stores (function index)(function parameter index)(variable index) for each component. for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { pindices.push_back((*p)->getFunctionIndex()); pindices.push_back((*p)->getParameterIndex()); if (varOverlaps) continue; // Only need to establish this once. FunctorBase::obsCont currObses; (*p)->getObservables(currObses); for (FunctorBase::obsIter o = currObses.begin(); o != currObses.end(); ++o) { if (find(observableCheck.begin(), observableCheck.end(), (*o)) == observableCheck.end()) continue; varOverlaps = true; break; } (*p)->getObservables(observableCheck); } if (varOverlaps) { // Check for components forcing separate normalisation for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { if ((*p)->getSpecialMask() & FunctorBase::ForceSeparateNorm) varOverlaps = false; } } hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ProdPdfs, sizeof(void*)); initialise(pindices); } __host__ fptype ProdThrustFunctor::normalise () const { if (varOverlaps) { // Two or more components share an observable and cannot be separately // normalised, since \int A*B dx does not equal int A dx * int B dx. recursiveSetNormalisation(fptype(1.0)); hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); // Normalise numerically. //std::cout << "Numerical normalisation of " << getName() << " due to varOverlaps.\n"; fptype ret = ThrustPdfFunctor::normalise(); //if (cpuDebug & 1) //std::cout << "ProdThrustFunctor " << getName() << " has normalisation " << ret << " " << host_callnumber << std::endl; return ret; } // Normalise components individually for (std::vector<FunctorBase*>::const_iterator c = components.begin(); c != components.end(); ++c) { (*c)->normalise(); } host_normalisation[parameters] = 1; hipMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); return 1.0; }
c2de4dba3b872c3698f68977de9e0ed4bdc62a9a.cu
#include "ProdThrustFunctor.hh" __device__ fptype device_ProdPdfs (fptype* evt, fptype* p, unsigned int* indices) { // Index structure is nP | F1 P1 | F2 P2 | ... // where nP is number of parameters, Fs are function indices, and Ps are parameter indices int numParams = indices[0]; fptype ret = 1; for (int i = 1; i < numParams; i += 2) { int fcnIdx = indices[i + 0]; int parIdx = indices[i + 1]; fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[fcnIdx])))(evt, p, paramIndices + parIdx); curr *= normalisationFactors[parIdx]; ret *= curr; //if ((0 == threadIdx.x) && (0 == blockIdx.x) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices)) //if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices)) //if (0.0001 < ret) //if ((gpuDebug & 1) && (isnan(curr))) //if (gpuDebug & 1) //if ((gpuDebug & 1) && (0 == threadIdx.x)) //printf("device_Prod: (%f %f %f %f %f) %f %f %f %i %i %i\n", evt[0], evt[1], evt[2], evt[3], evt[4], curr, ret, normalisationFactors[parIdx], i, parIdx, numParams); //printf("(%i, %i) device_Prod: (%f %f %f %f) %f %f %f %i\n", blockIdx.x, threadIdx.x, evt[0], evt[8], evt[6], evt[7], curr, ret, normalisationFactors[parIdx], i); //printf("(%i, %i) device_Prod: (%f %f) %f %f %f %i\n", blockIdx.x, threadIdx.x, evt[0], evt[1], curr, ret, normalisationFactors[parIdx], i); } return ret; } __device__ device_function_ptr ptr_to_ProdPdfs = device_ProdPdfs; ProdThrustFunctor::ProdThrustFunctor (std::string n, std::vector<FunctorBase*> comps) : ThrustPdfFunctor(0, n) , varOverlaps(false) { std::vector<unsigned int> pindices; for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { assert(*p); components.push_back(*p); } getObservables(observables); // Gathers from components FunctorBase::obsCont observableCheck; // Use to check for overlap in observables // Indices stores (function index)(function parameter index)(variable index) for each component. for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { pindices.push_back((*p)->getFunctionIndex()); pindices.push_back((*p)->getParameterIndex()); if (varOverlaps) continue; // Only need to establish this once. FunctorBase::obsCont currObses; (*p)->getObservables(currObses); for (FunctorBase::obsIter o = currObses.begin(); o != currObses.end(); ++o) { if (find(observableCheck.begin(), observableCheck.end(), (*o)) == observableCheck.end()) continue; varOverlaps = true; break; } (*p)->getObservables(observableCheck); } if (varOverlaps) { // Check for components forcing separate normalisation for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) { if ((*p)->getSpecialMask() & FunctorBase::ForceSeparateNorm) varOverlaps = false; } } cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_ProdPdfs, sizeof(void*)); initialise(pindices); } __host__ fptype ProdThrustFunctor::normalise () const { if (varOverlaps) { // Two or more components share an observable and cannot be separately // normalised, since \int A*B dx does not equal int A dx * int B dx. recursiveSetNormalisation(fptype(1.0)); cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); // Normalise numerically. //std::cout << "Numerical normalisation of " << getName() << " due to varOverlaps.\n"; fptype ret = ThrustPdfFunctor::normalise(); //if (cpuDebug & 1) //std::cout << "ProdThrustFunctor " << getName() << " has normalisation " << ret << " " << host_callnumber << std::endl; return ret; } // Normalise components individually for (std::vector<FunctorBase*>::const_iterator c = components.begin(); c != components.end(); ++c) { (*c)->normalise(); } host_normalisation[parameters] = 1; cudaMemcpyToSymbol(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); return 1.0; }
1534216fe5707408073a3ed81f0c1571da7d8648.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stereo.h" __global__ void LimitRangeKernel(float* src, float upperLimit, int width, int height, int stride, float *dst) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; /*if (src[pos] < 0.0f) { dst[pos] = 0.0f; }*/ if (src[pos] > upperLimit) { dst[pos] = upperLimit; } else { dst[pos] = src[pos]; } } void Stereo::LimitRange(float *src, float upperLimit, int w, int h, int s, float *dst) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); LimitRangeKernel << <blocks, threads >> > (src, upperLimit, w, h, s, dst); }
1534216fe5707408073a3ed81f0c1571da7d8648.cu
#include "stereo.h" __global__ void LimitRangeKernel(float* src, float upperLimit, int width, int height, int stride, float *dst) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; /*if (src[pos] < 0.0f) { dst[pos] = 0.0f; }*/ if (src[pos] > upperLimit) { dst[pos] = upperLimit; } else { dst[pos] = src[pos]; } } void Stereo::LimitRange(float *src, float upperLimit, int w, int h, int s, float *dst) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y)); LimitRangeKernel << <blocks, threads >> > (src, upperLimit, w, h, s, dst); }
e3547d7780eed3a600391c7c69b039f78e80ec9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace subsm { template<int bx> __global__ void kernel00( const int nsub, const int* ind, const int head, const int m, const int col, const int iPitch_ws, const int oPitch, real* buf_array_p, const real* wy, const real* ws, const real* d, const real theta ) { const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(j < nsub) { int pointr = Modular((head + i % col), m); const int k = ind[j]; if(i >= col) { mySum = ws[k * iPitch_ws + pointr] * theta; } else { mySum = wy[k * iPitch_ws + pointr]; } mySum *= d[j]; } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_array_p[i * oPitch + blockIdx.x] = mySum; } template<int bx> __global__ void kernel01( const int n, const int iPitch, const int oPitch, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[j * iPitch + i]; else mySum = 0; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if(tid == 0) { buf_out[j * oPitch + blockIdx.x] = mySum; } } void prog0( const int n, const int* ind, const int head, const int m, const int col, const int iPitch_ws, real* buf_array_p, const real* wy, const real* ws, const real* d, real* wv, const real theta, const int iPitch_normal, const hipStream_t& stream ) { int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output = (nblock1 == 1) ? wv : buf_array_p; int op20 = (nblock1 == 1) ? 1 : iPitch_normal; dynamicCall(kernel00, mi, nblock1, col * 2, stream, (n, ind, head, m, col, iPitch_ws, op20, output, wy, ws, d, theta)); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input = output; output = (nblock1 == 1) ? wv : (output + nblock0); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; dynamicCall(kernel01, mi, nblock1, col * 2, stream, (nblock0, iPitch_normal, op20, input, output)); nblock0 = nblock1; } } __global__ void kernel1( real* wv) { const int i = threadIdx.x; wv[i] = -wv[i]; } void prog1( real* wn, int col, int iPitch_wn, real* wv, const hipStream_t& stream ) { int col2 = col * 2; lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); hipblasSetStream(cublasHd, stream); cublasRtrsv( cublasHd, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, col2, wn, iPitch_wn, wv, 1); lbfgsbcuda::CheckBuffer(wn, iPitch_wn, iPitch_wn * 7); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); hipLaunchKernelGGL(( kernel1), dim3(1), dim3(col), 0, stream, wv); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); cublasRtrsv(cublasHd, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_T, HIPBLAS_DIAG_NON_UNIT, col2, wn, iPitch_wn, wv, 1); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); hipblasSetStream(cublasHd, NULL); } template<int bsize> __global__ void kernel2( int nsub, const int* ind, const int col, const int head, const int m, const int iPitch, const real* wv, const real* wy, const real* ws, const real inv_theta, real* d ) { const int i = blockIdx.x * blockDim.y + threadIdx.y; const int tidx = threadIdx.x; //8 const int tidy = threadIdx.y; //64 volatile __shared__ real sdata[(512 / bsize)][bsize + 1]; __shared__ real a[2][bsize+1]; real mySum; if(tidy == 0 && tidx < col) { a[0][tidx] = wv[tidx] * inv_theta; a[1][tidx] = wv[col + tidx]; } if(i < nsub && tidx < col) { const int pointr = Modular((head + tidx), m); const int k = ind[i]; __syncthreads(); mySum = wy[k * iPitch + pointr] * a[0][tidx] + ws[k * iPitch + pointr] * a[1][tidx]; } else mySum = 0; if(bsize > 1) { volatile real* smem = sdata[tidy] + tidx; *smem = mySum; __syncthreads(); if(bsize > 4) {*smem = mySum = mySum + smem[4];} if(bsize > 2) {*smem = mySum = mySum + smem[2];} if(bsize > 1) {*smem = mySum = mySum + smem[1];} } if(tidx == 0 && i < nsub) { d[i] = (d[i] + mySum) * inv_theta; } } void prog2( const int nsub, const int* ind, const int col, const int head, const int m, const int iPitch, const real* wv, const real* wy, const real* ws, const real theta, real* d, const hipStream_t& stream ) { real invtheta = 1.0 / theta; if(col > 4) { int nblocky = 512 / 8; hipLaunchKernelGGL(( kernel2<8>), dim3(dim3(iDivUp(nsub, nblocky))), dim3(dim3(8, nblocky)), 0, stream, nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col > 2) { int nblocky = 512 / 4; hipLaunchKernelGGL(( kernel2<4>), dim3(dim3(iDivUp(nsub, nblocky))), dim3(dim3(4, nblocky)), 0, stream, nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col > 1) { int nblocky = 512 / 2; hipLaunchKernelGGL(( kernel2<2>), dim3(dim3(iDivUp(nsub, nblocky))), dim3(dim3(2, nblocky)), 0, stream, nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col == 1){ int nblocky = 512 / 1; hipLaunchKernelGGL(( kernel2<1>), dim3(dim3(iDivUp(nsub, nblocky))), dim3(dim3(1, nblocky)), 0, stream, nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } } __global__ void kernel210( int nsub, const int* ind, const real* d, real* x, const real* l, const real* u, const int* nbd) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= nsub) return; const int k = ind[i]; real xk = x[k] + d[i]; const int nbdk = nbd[k]; if(nbdk == 1) { xk = maxr(l[k], xk); } else if(nbdk == 2) { xk = maxr(l[k], xk); xk = minr(u[k], xk); } else if(nbdk == 3) { xk = minr(u[k], xk); } x[k] = xk; } template<int bx> __global__ void kernel211( const int n, real* buf_n_r, const real* x, const real* xx, const real* gg ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { mySum = (x[i] - xx[i]) * gg[i]; } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_n_r[blockIdx.x] = mySum; } void prog21 ( int n, int nsub, const int* ind, const real* d, real* x, const real* l, const real* u, const int* nbd, const real* xx, const real* gg, real* buf_n_r, real* pddp, const hipStream_t& stream) { hipLaunchKernelGGL(( kernel210), dim3(iDivUp(n, 512)), dim3(512), 0, stream, nsub, ind, d, x, l, u, nbd); int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output = (nblock1 == 1) ? pddp : buf_n_r; dynamicCall(kernel211, mi, nblock1, 1, stream, (n, output, x, xx, gg)); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input = output; output = (nblock1 == 1) ? pddp : (output + nblock0); dynamicCall(kernel01, mi, nblock1, 1, stream, (nblock0, n, 1, input, output)); nblock0 = nblock1; } } __device__ inline void minex(volatile real& a, volatile real& b, volatile int& ia, volatile int& ib) { if(a > b) { ia = ib, a = b; } } template<int bx> __global__ void kernel30( const int nsub, const int* ind, real* d, const int* nbd, real* t, int* ti, real* x, const real* u, const real* l ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; volatile __shared__ int sdatai[bx]; real mySum = 1.0; if(i < nsub) { const int k = ind[i]; const int nbdi = nbd[k]; if(nbdi != 0) { real dk = d[i]; if( dk < 0 && nbdi <= 2 ) { real temp2 = l[k] - x[k]; if( temp2 >= 0 ) { mySum = 0; } else { mySum = minr(1.0, temp2 / dk); } } else if( dk > 0 && nbdi >= 2 ) { real temp2 = u[k] - x[k]; if( temp2 <= 0 ) { mySum = 0; } else { mySum = minr(1.0, temp2 / dk); } } } } sdata[tid] = mySum; sdatai[tid] = i; __syncthreads(); t[i] = mySum; ti[i] = i; if(bx > 512) {if (tid < 512) { minex(sdata[tid], sdata[tid + 512], sdatai[tid], sdatai[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { minex(sdata[tid], sdata[tid + 256], sdatai[tid], sdatai[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { minex(sdata[tid], sdata[tid + 128], sdatai[tid], sdatai[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { minex(sdata[tid], sdata[tid + 64], sdatai[tid], sdatai[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; volatile int* smemi = sdatai + tid; if(bx > 32) {minex(*smem, smem[32], *smemi, smemi[32]);} if(bx > 16) {minex(*smem, smem[16], *smemi, smemi[16]);} if(bx > 8) {minex(*smem, smem[8], *smemi, smemi[8]);} if(bx > 4) {minex(*smem, smem[4], *smemi, smemi[4]);} if(bx > 2) {minex(*smem, smem[2], *smemi, smemi[2]);} if(bx > 1) {minex(*smem, smem[1], *smemi, smemi[1]);} if (tid == 0) { t[blockIdx.x] = *smem; ti[blockIdx.x] = *smemi; if(gridDim.x == 1 && *smem < 1) { real dk = d[*smemi]; const int k = ind[*smemi]; if(dk > 0) { x[k] = u[k]; d[*smemi] = 0; } else if(dk < 0) { x[k] = l[k]; d[*smemi] = 0; } } } } } template<int bx> __global__ void kernel31( const int n, const int* ind, const real* buf_in, const int* bufi_in, real* buf_out, int* bufi_out, real* d, real* x, const real* u, const real* l ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; volatile __shared__ int sdatai[bx]; real mySum; int mySumi; if(i < n) { mySum = buf_in[i]; mySumi = bufi_in[i]; } else { mySum = 1.0; mySumi = 0; } sdata[tid] = mySum; sdatai[tid] = mySumi; __syncthreads(); if(bx > 512) {if (tid < 512) { minex(sdata[tid], sdata[tid + 512], sdatai[tid], sdatai[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { minex(sdata[tid], sdata[tid + 256], sdatai[tid], sdatai[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { minex(sdata[tid], sdata[tid + 128], sdatai[tid], sdatai[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { minex(sdata[tid], sdata[tid + 64], sdatai[tid], sdatai[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; volatile int* smemi = sdatai + tid; if(bx > 32) {minex(*smem, smem[32], *smemi, smemi[32]);} if(bx > 16) {minex(*smem, smem[16], *smemi, smemi[16]);} if(bx > 8) {minex(*smem, smem[8], *smemi, smemi[8]);} if(bx > 4) {minex(*smem, smem[4], *smemi, smemi[4]);} if(bx > 2) {minex(*smem, smem[2], *smemi, smemi[2]);} if(bx > 1) {minex(*smem, smem[1], *smemi, smemi[1]);} if (tid == 0) { buf_out[blockIdx.x] = *smem; bufi_out[blockIdx.x] = *smemi; if(gridDim.x == 1 && *smem < 1) { real dk = d[*smemi]; const int k = ind[*smemi]; if(dk > 0) { x[k] = u[k]; d[*smemi] = 0; } else if(dk < 0) { x[k] = l[k]; d[*smemi] = 0; } } } } } __global__ void kernel32( const int nsub, const int* ind, real* x, const real* d, const real* alpha ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ real salpha[1]; if(i >= nsub) return; const int k = ind[i]; if(threadIdx.x == 0) { *salpha = alpha[0]; } real xi = x[k]; real di = d[i]; __syncthreads(); x[k] = salpha[0] * di + xi; } void prog3 ( const int nsub, const int* ind, real* d, const int* nbd, real* buf_s_r, int* bufi_s_r, real* x, const real* u, const real* l, const hipStream_t& stream ) { //kernel30(nsub, d, nbd, buf_s_r, bufi_s_r, x, u, l, alpha); int nblock0 = nsub; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output_r = buf_s_r; int* output_i = bufi_s_r; dynamicCall(kernel30, mi, nblock1, 1, stream, (nsub, ind, d, nbd, output_r, output_i, x, u, l)); /* kernel30<<<dim3(nblock1), dim3(512)>>> (nsub, d, nbd, output_r, output_i, x, u, l);*/ CheckBuffer_int(output_i, nsub, nsub); CheckBuffer(output_r, nsub, nsub); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input_r = output_r; int* input_i = output_i; output_r = output_r + nblock0; output_i = output_i + nblock0; dynamicCall(kernel31, mi, nblock1, 1, stream, (nblock0, ind, input_r, input_i, output_r, output_i, d, x, u, l)); /* kernel31<<<dim3(nblock1), dim3(512)>>> (nblock0, input_r, input_i, output_r, output_i, d, x, u, l);*/ nblock0 = nblock1; } hipLaunchKernelGGL(( kernel32), dim3(dim3(iDivUp(nsub, 512))), dim3(dim3(512)), 0, stream, nsub, ind, x, d, output_r); } }; };
e3547d7780eed3a600391c7c69b039f78e80ec9d.cu
/************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace subsm { template<int bx> __global__ void kernel00( const int nsub, const int* ind, const int head, const int m, const int col, const int iPitch_ws, const int oPitch, real* buf_array_p, const real* wy, const real* ws, const real* d, const real theta ) { const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(j < nsub) { int pointr = Modular((head + i % col), m); const int k = ind[j]; if(i >= col) { mySum = ws[k * iPitch_ws + pointr] * theta; } else { mySum = wy[k * iPitch_ws + pointr]; } mySum *= d[j]; } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_array_p[i * oPitch + blockIdx.x] = mySum; } template<int bx> __global__ void kernel01( const int n, const int iPitch, const int oPitch, const real* buf_in, real* buf_out) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) mySum = buf_in[j * iPitch + i]; else mySum = 0; sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if(tid == 0) { buf_out[j * oPitch + blockIdx.x] = mySum; } } void prog0( const int n, const int* ind, const int head, const int m, const int col, const int iPitch_ws, real* buf_array_p, const real* wy, const real* ws, const real* d, real* wv, const real theta, const int iPitch_normal, const cudaStream_t& stream ) { int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output = (nblock1 == 1) ? wv : buf_array_p; int op20 = (nblock1 == 1) ? 1 : iPitch_normal; dynamicCall(kernel00, mi, nblock1, col * 2, stream, (n, ind, head, m, col, iPitch_ws, op20, output, wy, ws, d, theta)); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input = output; output = (nblock1 == 1) ? wv : (output + nblock0); int op20 = (nblock1 == 1) ? 1 : iPitch_normal; dynamicCall(kernel01, mi, nblock1, col * 2, stream, (nblock0, iPitch_normal, op20, input, output)); nblock0 = nblock1; } } __global__ void kernel1( real* wv) { const int i = threadIdx.x; wv[i] = -wv[i]; } void prog1( real* wn, int col, int iPitch_wn, real* wv, const cudaStream_t& stream ) { int col2 = col * 2; lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); cublasSetStream(cublasHd, stream); cublasRtrsv( cublasHd, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, col2, wn, iPitch_wn, wv, 1); lbfgsbcuda::CheckBuffer(wn, iPitch_wn, iPitch_wn * 7); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); kernel1<<<1, col, 0, stream>>> (wv); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); cublasRtrsv(cublasHd, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT, col2, wn, iPitch_wn, wv, 1); lbfgsbcuda::CheckBuffer(wv, col * 2, col * 2); cublasSetStream(cublasHd, NULL); } template<int bsize> __global__ void kernel2( int nsub, const int* ind, const int col, const int head, const int m, const int iPitch, const real* wv, const real* wy, const real* ws, const real inv_theta, real* d ) { const int i = blockIdx.x * blockDim.y + threadIdx.y; const int tidx = threadIdx.x; //8 const int tidy = threadIdx.y; //64 volatile __shared__ real sdata[(512 / bsize)][bsize + 1]; __shared__ real a[2][bsize+1]; real mySum; if(tidy == 0 && tidx < col) { a[0][tidx] = wv[tidx] * inv_theta; a[1][tidx] = wv[col + tidx]; } if(i < nsub && tidx < col) { const int pointr = Modular((head + tidx), m); const int k = ind[i]; __syncthreads(); mySum = wy[k * iPitch + pointr] * a[0][tidx] + ws[k * iPitch + pointr] * a[1][tidx]; } else mySum = 0; if(bsize > 1) { volatile real* smem = sdata[tidy] + tidx; *smem = mySum; __syncthreads(); if(bsize > 4) {*smem = mySum = mySum + smem[4];} if(bsize > 2) {*smem = mySum = mySum + smem[2];} if(bsize > 1) {*smem = mySum = mySum + smem[1];} } if(tidx == 0 && i < nsub) { d[i] = (d[i] + mySum) * inv_theta; } } void prog2( const int nsub, const int* ind, const int col, const int head, const int m, const int iPitch, const real* wv, const real* wy, const real* ws, const real theta, real* d, const cudaStream_t& stream ) { real invtheta = 1.0 / theta; if(col > 4) { int nblocky = 512 / 8; kernel2<8><<<dim3(iDivUp(nsub, nblocky)), dim3(8, nblocky), 0, stream>>> (nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col > 2) { int nblocky = 512 / 4; kernel2<4><<<dim3(iDivUp(nsub, nblocky)), dim3(4, nblocky), 0, stream>>> (nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col > 1) { int nblocky = 512 / 2; kernel2<2><<<dim3(iDivUp(nsub, nblocky)), dim3(2, nblocky), 0, stream>>> (nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } else if(col == 1){ int nblocky = 512 / 1; kernel2<1><<<dim3(iDivUp(nsub, nblocky)), dim3(1, nblocky), 0, stream>>> (nsub, ind, col, head, m, iPitch, wv, wy, ws, invtheta, d); } } __global__ void kernel210( int nsub, const int* ind, const real* d, real* x, const real* l, const real* u, const int* nbd) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= nsub) return; const int k = ind[i]; real xk = x[k] + d[i]; const int nbdk = nbd[k]; if(nbdk == 1) { xk = maxr(l[k], xk); } else if(nbdk == 2) { xk = maxr(l[k], xk); xk = minr(u[k], xk); } else if(nbdk == 3) { xk = minr(u[k], xk); } x[k] = xk; } template<int bx> __global__ void kernel211( const int n, real* buf_n_r, const real* x, const real* xx, const real* gg ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; real mySum; if(i < n) { mySum = (x[i] - xx[i]) * gg[i]; } else { mySum = 0; } sdata[tid] = mySum; __syncthreads(); if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; if(bx > 32) {*smem = mySum = mySum + smem[32];} if(bx > 16) {*smem = mySum = mySum + smem[16];} if(bx > 8) {*smem = mySum = mySum + smem[8];} if(bx > 4) {*smem = mySum = mySum + smem[4];} if(bx > 2) {*smem = mySum = mySum + smem[2];} if(bx > 1) {*smem = mySum = mySum + smem[1];} } if (tid == 0) buf_n_r[blockIdx.x] = mySum; } void prog21 ( int n, int nsub, const int* ind, const real* d, real* x, const real* l, const real* u, const int* nbd, const real* xx, const real* gg, real* buf_n_r, real* pddp, const cudaStream_t& stream) { kernel210<<<iDivUp(n, 512), 512, 0, stream>>> (nsub, ind, d, x, l, u, nbd); int nblock0 = n; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output = (nblock1 == 1) ? pddp : buf_n_r; dynamicCall(kernel211, mi, nblock1, 1, stream, (n, output, x, xx, gg)); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input = output; output = (nblock1 == 1) ? pddp : (output + nblock0); dynamicCall(kernel01, mi, nblock1, 1, stream, (nblock0, n, 1, input, output)); nblock0 = nblock1; } } __device__ inline void minex(volatile real& a, volatile real& b, volatile int& ia, volatile int& ib) { if(a > b) { ia = ib, a = b; } } template<int bx> __global__ void kernel30( const int nsub, const int* ind, real* d, const int* nbd, real* t, int* ti, real* x, const real* u, const real* l ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; volatile __shared__ int sdatai[bx]; real mySum = 1.0; if(i < nsub) { const int k = ind[i]; const int nbdi = nbd[k]; if(nbdi != 0) { real dk = d[i]; if( dk < 0 && nbdi <= 2 ) { real temp2 = l[k] - x[k]; if( temp2 >= 0 ) { mySum = 0; } else { mySum = minr(1.0, temp2 / dk); } } else if( dk > 0 && nbdi >= 2 ) { real temp2 = u[k] - x[k]; if( temp2 <= 0 ) { mySum = 0; } else { mySum = minr(1.0, temp2 / dk); } } } } sdata[tid] = mySum; sdatai[tid] = i; __syncthreads(); t[i] = mySum; ti[i] = i; if(bx > 512) {if (tid < 512) { minex(sdata[tid], sdata[tid + 512], sdatai[tid], sdatai[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { minex(sdata[tid], sdata[tid + 256], sdatai[tid], sdatai[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { minex(sdata[tid], sdata[tid + 128], sdatai[tid], sdatai[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { minex(sdata[tid], sdata[tid + 64], sdatai[tid], sdatai[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; volatile int* smemi = sdatai + tid; if(bx > 32) {minex(*smem, smem[32], *smemi, smemi[32]);} if(bx > 16) {minex(*smem, smem[16], *smemi, smemi[16]);} if(bx > 8) {minex(*smem, smem[8], *smemi, smemi[8]);} if(bx > 4) {minex(*smem, smem[4], *smemi, smemi[4]);} if(bx > 2) {minex(*smem, smem[2], *smemi, smemi[2]);} if(bx > 1) {minex(*smem, smem[1], *smemi, smemi[1]);} if (tid == 0) { t[blockIdx.x] = *smem; ti[blockIdx.x] = *smemi; if(gridDim.x == 1 && *smem < 1) { real dk = d[*smemi]; const int k = ind[*smemi]; if(dk > 0) { x[k] = u[k]; d[*smemi] = 0; } else if(dk < 0) { x[k] = l[k]; d[*smemi] = 0; } } } } } template<int bx> __global__ void kernel31( const int n, const int* ind, const real* buf_in, const int* bufi_in, real* buf_out, int* bufi_out, real* d, real* x, const real* u, const real* l ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.x; volatile __shared__ real sdata[bx]; volatile __shared__ int sdatai[bx]; real mySum; int mySumi; if(i < n) { mySum = buf_in[i]; mySumi = bufi_in[i]; } else { mySum = 1.0; mySumi = 0; } sdata[tid] = mySum; sdatai[tid] = mySumi; __syncthreads(); if(bx > 512) {if (tid < 512) { minex(sdata[tid], sdata[tid + 512], sdatai[tid], sdatai[tid + 512]); } __syncthreads();} if(bx > 256) {if (tid < 256) { minex(sdata[tid], sdata[tid + 256], sdatai[tid], sdatai[tid + 256]); } __syncthreads();} if(bx > 128) {if (tid < 128) { minex(sdata[tid], sdata[tid + 128], sdatai[tid], sdatai[tid + 128]); } __syncthreads();} if(bx > 64) {if (tid < 64) { minex(sdata[tid], sdata[tid + 64], sdatai[tid], sdatai[tid + 64]); } __syncthreads();} if (tid < __min(bx / 2, 32)) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile real* smem = sdata + tid; volatile int* smemi = sdatai + tid; if(bx > 32) {minex(*smem, smem[32], *smemi, smemi[32]);} if(bx > 16) {minex(*smem, smem[16], *smemi, smemi[16]);} if(bx > 8) {minex(*smem, smem[8], *smemi, smemi[8]);} if(bx > 4) {minex(*smem, smem[4], *smemi, smemi[4]);} if(bx > 2) {minex(*smem, smem[2], *smemi, smemi[2]);} if(bx > 1) {minex(*smem, smem[1], *smemi, smemi[1]);} if (tid == 0) { buf_out[blockIdx.x] = *smem; bufi_out[blockIdx.x] = *smemi; if(gridDim.x == 1 && *smem < 1) { real dk = d[*smemi]; const int k = ind[*smemi]; if(dk > 0) { x[k] = u[k]; d[*smemi] = 0; } else if(dk < 0) { x[k] = l[k]; d[*smemi] = 0; } } } } } __global__ void kernel32( const int nsub, const int* ind, real* x, const real* d, const real* alpha ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; __shared__ real salpha[1]; if(i >= nsub) return; const int k = ind[i]; if(threadIdx.x == 0) { *salpha = alpha[0]; } real xi = x[k]; real di = d[i]; __syncthreads(); x[k] = salpha[0] * di + xi; } void prog3 ( const int nsub, const int* ind, real* d, const int* nbd, real* buf_s_r, int* bufi_s_r, real* x, const real* u, const real* l, const cudaStream_t& stream ) { //kernel30(nsub, d, nbd, buf_s_r, bufi_s_r, x, u, l, alpha); int nblock0 = nsub; int mi = log2Up(nblock0); int nblock1 = iDivUp2(nblock0, mi); real* output_r = buf_s_r; int* output_i = bufi_s_r; dynamicCall(kernel30, mi, nblock1, 1, stream, (nsub, ind, d, nbd, output_r, output_i, x, u, l)); /* kernel30<<<dim3(nblock1), dim3(512)>>> (nsub, d, nbd, output_r, output_i, x, u, l);*/ CheckBuffer_int(output_i, nsub, nsub); CheckBuffer(output_r, nsub, nsub); nblock0 = nblock1; while(nblock0 > 1) { nblock1 = iDivUp2(nblock0, mi); real* input_r = output_r; int* input_i = output_i; output_r = output_r + nblock0; output_i = output_i + nblock0; dynamicCall(kernel31, mi, nblock1, 1, stream, (nblock0, ind, input_r, input_i, output_r, output_i, d, x, u, l)); /* kernel31<<<dim3(nblock1), dim3(512)>>> (nblock0, input_r, input_i, output_r, output_i, d, x, u, l);*/ nblock0 = nblock1; } kernel32<<<dim3(iDivUp(nsub, 512)), dim3(512), 0, stream>>> (nsub, ind, x, d, output_r); } }; };
0955a61486d054db0e48c38f633b4f911bf6ff87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Usage: nvcc -lcublas -DPRINT_RES -DPRINT_PERF gemm.cu #include "rocblas.h" #include "hip/library_types.h" #include <stdio.h> #include "iostream" #define NUM_THREADS_PER_BLOCK 1024 #define vec 4 using namespace std; __global__ void init_a_b(half *a, half *b, int M, int N, int K) { int c16 = 5; for(int i = 0; i < M; i++){ for(int j = 0; j < K; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; half sum = __float2half_rd(resf); a[i * K + j] = sum; } } for(int i = 0; i < K; i++){ for (int j = 0; j < N; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; half sum = __float2half_rd(resf); b[i * N + j] = sum; } } } __global__ void init_c(float *c_float, int M, int N) { for (int t = 0; t < M * N; t++) { c_float[t] = 0.0f; } } __global__ void init_c_cst(float *c_float, int M, int N) { int c16 = 5; for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; c_float[i * N + j] = resf; } } } void print_res_host(float * arr, int m, int n) { std::cout << "["; for(int i = 0; i < m; i++){ if(i == 0) std::cout << "["; else std::cout << " ["; for (int j = 0; j < n; j++){ if(j == 0) std::cout << (arr[i * n + j]); else std::cout<<", "<< (arr[i * n + j]); } if(i == m - 1) std::cout << "]"; else std::cout << "], "<<std::endl; } std::cout << "]"; } __global__ void matAdd(float *c, float* c_cst, int m, int n){ for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) { // Calculate this block's starting address. float *base = c + (i * vec); float4 *cGmem = (float4*)base; float4 cData = *(cGmem); float *cst_base = c_cst + (i * vec); float4 *cst_cGmem = (float4*)cst_base; float4 cst_cData = *(cst_cGmem); cData.w = cData.w + cst_cData.w; cData.x = cData.x + cst_cData.x; cData.y = cData.y + cst_cData.y; cData.z = cData.z + cst_cData.z; *(cGmem) = cData; //printf("%f\n",(float)cData.w); } } int main(int argc, char **argv) { if(argc != 5){ printf("Specify problem sizes as ./gemm m n k num_iters\n"); return 0; } int M = std::atoi(argv[1]); int N = std::atoi(argv[2]); int K = std::atoi(argv[3]); int num_iters = std::atoi(argv[4]); hipblasHandle_t handle; hipblasCreate(&handle); half *A, *B; float *C, *C_cst; hipMalloc(&A, M * K * sizeof(half)); hipMalloc(&B, K * N * sizeof(half)); hipMalloc(&C, M * N * sizeof(float)); hipMalloc(&C_cst, M * N * sizeof(float)); float alpha = 1.0; float beta = 1.0; hipLaunchKernelGGL(( init_a_b), dim3(1), dim3(1), 0, 0, A, B, M, N, K); hipLaunchKernelGGL(( init_c), dim3(1), dim3(1), 0, 0, C, M, N); hipLaunchKernelGGL(( init_c_cst), dim3(1), dim3(1), 0, 0, C_cst, M, N); // Warmup iterations. for(int i = 0; i < 5; ++i){ if (HIPBLAS_STATUS_SUCCESS != hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, HIP_R_16F, N, A, HIP_R_16F, K, &beta, C, HIP_R_32F, N, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) { printf("hipblasGemmEx failed\n"); exit(-1); } } // Profiling iterations. hipblasHandle_t cublasHandle; hipblasCreate(&cublasHandle); cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); float aggregateTime = 0.0f; for(int i = 0; i < num_iters; ++i){ float ms = 0.0f; hipLaunchKernelGGL(( init_c), dim3(1), dim3(1), 0, 0, C, M, N); hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); if (HIPBLAS_STATUS_SUCCESS != hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N, M, K, &alpha, B, HIP_R_16F, N, A, HIP_R_16F, K, &beta, C, HIP_R_32F, N, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) { printf("hipblasGemmEx failed\n"); exit(-1); } dim3 block(NUM_THREADS_PER_BLOCK, 1, 1); dim3 grid(((M * N) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1); hipLaunchKernelGGL(( matAdd), dim3(grid), dim3(block), 0, 0, C, C_cst, M, N); hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&ms, start, end); hipEventDestroy(start); hipEventDestroy(end); aggregateTime += ms; } #ifdef PRINT_PERF float avg_time = ((aggregateTime / num_iters) / 1000.0f); float ops = (float)M * (float)N * (float)K * 2.0f; float tflops = (ops * 1.0e-12f) / (avg_time); fprintf(stderr, "m:%d, n:%d, k:%d, ", M, N, K); fprintf(stderr, "%f TFLOPS\n", tflops); #endif #ifdef PRINT_RES float * C_host; C_host = (float*)malloc(M * N * sizeof(float)); hipDeviceSynchronize(); hipMemcpy(C_host, C, M * N * sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); print_res_host(C_host, M, N); free(C_host); #endif hipFree(A); hipFree(B); hipFree(C); return 0; }
0955a61486d054db0e48c38f633b4f911bf6ff87.cu
// Usage: nvcc -lcublas -DPRINT_RES -DPRINT_PERF gemm.cu #include "cublas_v2.h" #include "library_types.h" #include <stdio.h> #include "iostream" #define NUM_THREADS_PER_BLOCK 1024 #define vec 4 using namespace std; __global__ void init_a_b(half *a, half *b, int M, int N, int K) { int c16 = 5; for(int i = 0; i < M; i++){ for(int j = 0; j < K; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; half sum = __float2half_rd(resf); a[i * K + j] = sum; } } for(int i = 0; i < K; i++){ for (int j = 0; j < N; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; half sum = __float2half_rd(resf); b[i * N + j] = sum; } } } __global__ void init_c(float *c_float, int M, int N) { for (int t = 0; t < M * N; t++) { c_float[t] = 0.0f; } } __global__ void init_c_cst(float *c_float, int M, int N) { int c16 = 5; for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ int im = i % c16; int jm = j % c16; int add = im + jm; int am = add % c16; float resf = (float) am; c_float[i * N + j] = resf; } } } void print_res_host(float * arr, int m, int n) { std::cout << "["; for(int i = 0; i < m; i++){ if(i == 0) std::cout << "["; else std::cout << " ["; for (int j = 0; j < n; j++){ if(j == 0) std::cout << (arr[i * n + j]); else std::cout<<", "<< (arr[i * n + j]); } if(i == m - 1) std::cout << "]"; else std::cout << "], "<<std::endl; } std::cout << "]"; } __global__ void matAdd(float *c, float* c_cst, int m, int n){ for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < m * n; i += (((m * n) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec)) * blockDim.x * vec) { // Calculate this block's starting address. float *base = c + (i * vec); float4 *cGmem = (float4*)base; float4 cData = *(cGmem); float *cst_base = c_cst + (i * vec); float4 *cst_cGmem = (float4*)cst_base; float4 cst_cData = *(cst_cGmem); cData.w = cData.w + cst_cData.w; cData.x = cData.x + cst_cData.x; cData.y = cData.y + cst_cData.y; cData.z = cData.z + cst_cData.z; *(cGmem) = cData; //printf("%f\n",(float)cData.w); } } int main(int argc, char **argv) { if(argc != 5){ printf("Specify problem sizes as ./gemm m n k num_iters\n"); return 0; } int M = std::atoi(argv[1]); int N = std::atoi(argv[2]); int K = std::atoi(argv[3]); int num_iters = std::atoi(argv[4]); cublasHandle_t handle; cublasCreate(&handle); half *A, *B; float *C, *C_cst; cudaMalloc(&A, M * K * sizeof(half)); cudaMalloc(&B, K * N * sizeof(half)); cudaMalloc(&C, M * N * sizeof(float)); cudaMalloc(&C_cst, M * N * sizeof(float)); float alpha = 1.0; float beta = 1.0; init_a_b<<<1, 1>>>(A, B, M, N, K); init_c<<<1, 1>>>(C, M, N); init_c_cst<<<1, 1>>>(C_cst, M, N); // Warmup iterations. for(int i = 0; i < 5; ++i){ if (CUBLAS_STATUS_SUCCESS != cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, CUDA_R_16F, N, A, CUDA_R_16F, K, &beta, C, CUDA_R_32F, N, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) { printf("cublasGemmEx failed\n"); exit(-1); } } // Profiling iterations. cublasHandle_t cublasHandle; cublasCreate(&cublasHandle); cublasSetMathMode(cublasHandle, CUBLAS_TENSOR_OP_MATH); float aggregateTime = 0.0f; for(int i = 0; i < num_iters; ++i){ float ms = 0.0f; init_c<<<1, 1>>>(C, M, N); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); if (CUBLAS_STATUS_SUCCESS != cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, M, K, &alpha, B, CUDA_R_16F, N, A, CUDA_R_16F, K, &beta, C, CUDA_R_32F, N, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)) { printf("cublasGemmEx failed\n"); exit(-1); } dim3 block(NUM_THREADS_PER_BLOCK, 1, 1); dim3 grid(((M * N) + (NUM_THREADS_PER_BLOCK * vec) - 1) / (NUM_THREADS_PER_BLOCK * vec), 1, 1); matAdd<<<grid, block>>>(C, C_cst, M, N); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&ms, start, end); cudaEventDestroy(start); cudaEventDestroy(end); aggregateTime += ms; } #ifdef PRINT_PERF float avg_time = ((aggregateTime / num_iters) / 1000.0f); float ops = (float)M * (float)N * (float)K * 2.0f; float tflops = (ops * 1.0e-12f) / (avg_time); fprintf(stderr, "m:%d, n:%d, k:%d, ", M, N, K); fprintf(stderr, "%f TFLOPS\n", tflops); #endif #ifdef PRINT_RES float * C_host; C_host = (float*)malloc(M * N * sizeof(float)); cudaDeviceSynchronize(); cudaMemcpy(C_host, C, M * N * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); print_res_host(C_host, M, N); free(C_host); #endif cudaFree(A); cudaFree(B); cudaFree(C); return 0; }
19167382c475a2621cc8c5833ebba4808fadd5dc.hip
// !!! This is a file automatically generated by hipify!!! /* To compile: nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for CUDA header file here: #include "hip/hip_runtime.h" #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array // Q2c: transform this function into a CUDA kernel void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of threads per block and the number of blocks here: float *d_a; hipMalloc(&d_a, Nre*sizeof(float)); float Bx = Nthreads; float By = Nthreads; float Gx = (Nre+Bx-1)/Bx; float Gy = (Nim+By-1)/By; dim3 B(Bx,By,1); //Bx*By threads in thread block dim3 G(Gx, Gy,1); //Gx*Gy grid of thread blocks //kernelmandelbrot<<<G,B>>>(Nre,Nim, complex_t cmin, complex_t cmax, float *count); int n,m; // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); float *new_count; hipMalloc(&new_count, Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; clock_t start = clock(); //start time in CPU cycles hipLaunchKernelGGL(( kernelmandelbrot), dim3(G),dim3(B), 0, 0, Nre,Nim, complex_t cmin, complex_t cmax, *count); int n,m; // compute mandelbrot set // mandelbrot(Nre, Nim, cmin, cmax, count); clock_t end = clock(); //start time in CPU cycles // print elapsed time printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC); hipMemcpy(d_a, N*sizeof(float), hipMemcpyDeviceToHost); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); printf("Printing mandelbrot.png..."); write_hot_png(fp, Nre, Nim, count, 0, 80); printf("done.\n"); free(count); exit(0); return 0; }
19167382c475a2621cc8c5833ebba4808fadd5dc.cu
/* To compile: nvcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for CUDA header file here: #include "cuda.h" #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array // Q2c: transform this function into a CUDA kernel void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of threads per block and the number of blocks here: float *d_a; cudaMalloc(&d_a, Nre*sizeof(float)); float Bx = Nthreads; float By = Nthreads; float Gx = (Nre+Bx-1)/Bx; float Gy = (Nim+By-1)/By; dim3 B(Bx,By,1); //Bx*By threads in thread block dim3 G(Gx, Gy,1); //Gx*Gy grid of thread blocks //kernelmandelbrot<<<G,B>>>(Nre,Nim, complex_t cmin, complex_t cmax, float *count); int n,m; // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); float *new_count; cudaMalloc(&new_count, Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; clock_t start = clock(); //start time in CPU cycles kernelmandelbrot<<<G,B>>>(Nre,Nim, complex_t cmin, complex_t cmax, *count); int n,m; // compute mandelbrot set // mandelbrot(Nre, Nim, cmin, cmax, count); clock_t end = clock(); //start time in CPU cycles // print elapsed time printf("elapsed = %f\n", ((double)(end-start))/CLOCKS_PER_SEC); cudaMemcpy(d_a, N*sizeof(float), cudaMemcpyDeviceToHost); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); printf("Printing mandelbrot.png..."); write_hot_png(fp, Nre, Nim, count, 0, 80); printf("done.\n"); free(count); exit(0); return 0; }
9e3b1cdec7f6d1f5be97d0f04c5eeadb40a794e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CUDATest.hpp" #include <ATen/hip/Exceptions.h> namespace c10d { namespace test { namespace { __global__ void waitClocks(const uint64_t count) { clock_t start = clock64(); clock_t offset = 0; while (offset < count) { offset = clock() - start; } } } // namespace void cudaSleep(at::hip::HIPStreamMasqueradingAsCUDA& stream, uint64_t clocks) { hipLaunchKernelGGL(( waitClocks), dim3(1), dim3(1), 0, stream.stream(), clocks); C10_HIP_KERNEL_LAUNCH_CHECK(); } int cudaNumDevices() { int n = 0; C10_HIP_CHECK_WARN(hipGetDeviceCount(&n)); return n; } } // namespace test } // namespace c10d
9e3b1cdec7f6d1f5be97d0f04c5eeadb40a794e5.cu
#include "CUDATest.hpp" #include <ATen/cuda/Exceptions.h> namespace c10d { namespace test { namespace { __global__ void waitClocks(const uint64_t count) { clock_t start = clock64(); clock_t offset = 0; while (offset < count) { offset = clock() - start; } } } // namespace void cudaSleep(at::cuda::CUDAStream& stream, uint64_t clocks) { waitClocks<<<1, 1, 0, stream.stream()>>>(clocks); C10_CUDA_KERNEL_LAUNCH_CHECK(); } int cudaNumDevices() { int n = 0; C10_CUDA_CHECK_WARN(cudaGetDeviceCount(&n)); return n; } } // namespace test } // namespace c10d
9d049d3415bda05ceceafc33011c86d0e1850ec8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template <class T, unsigned int blockSize> __global__ void FUNC(reduce5)(T *g_idata, T *g_odata, unsigned int n) { SharedMemory<T> smem; T *sdata = smem.getPointer(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int j = blockIdx.x*(blockSize*2) + threadIdx.x, k = threadIdx.x; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; sdata[tid] = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) sdata[tid] += g_idata[i+blockSize]; }
9d049d3415bda05ceceafc33011c86d0e1850ec8.cu
template <class T, unsigned int blockSize> __global__ void FUNC(reduce5)(T *g_idata, T *g_odata, unsigned int n) { SharedMemory<T> smem; T *sdata = smem.getPointer(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int j = blockIdx.x*(blockSize*2) + threadIdx.x, k = threadIdx.x; int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE; sdata[tid] = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) sdata[tid] += g_idata[i+blockSize]; }
1ad90a97ea9602565a6954b0aa2a09abc1133fc9.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "THHHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> template <typename T> struct absupdateOutput_functor { __device__ void operator()(T* output, const T* input) const { *output = THCNumerics<T>::abs(*input); } }; template <typename T> struct absupdateGradInput_functor { __device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const { *gradInput = *input < 0 ? - *gradOutput : *gradOutput; } }; #include "generic/Abs.cu" #include "THHGenerateFloatTypes.h"
1ad90a97ea9602565a6954b0aa2a09abc1133fc9.cu
#include "THCUNN.h" #include "THCHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> template <typename T> struct absupdateOutput_functor { __device__ void operator()(T* output, const T* input) const { *output = THCNumerics<T>::abs(*input); } }; template <typename T> struct absupdateGradInput_functor { __device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const { *gradInput = *input < 0 ? - *gradOutput : *gradOutput; } }; #include "generic/Abs.cu" #include "THCGenerateFloatTypes.h"
02a7ba49e693b2a5d8f2f42147873b520467e93c.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // kernel menambahkan vector __global__ void tambahVector( const float *cVectorA, const float *cVectorB, float *cVectorC, const int cJumlahElemen) { // cari indeks saya int idx_ = 0; } // fungsi main untuk panggil kernel int main(void) { // gunakan GPU ke-1 hipSetDevice(0); const int kJumlahElemen = 25600; size_t ukuran_vector_bytes_ = kJumlahElemen * sizeof(float); std::cout << "[Penjumlahan vector dengan jumlah elemen " << kJumlahElemen << std::endl; float *h_A_ = (float *)malloc(ukuran_vector_bytes_); float *h_B_ = (float *)malloc(ukuran_vector_bytes_); float *h_C_ = (float *)malloc(ukuran_vector_bytes_); if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL) { std::cerr << "Failed to allocate host vectors!\n"; exit(-1); } for (int i = 0; i < kJumlahElemen; ++i) { h_A_[i] = rand()/(float)RAND_MAX; h_B_[i] = rand()/(float)RAND_MAX; } float *d_A_ = NULL; float *d_B_ = NULL; float *d_C_ = NULL; hipMalloc((void **)&d_A_, ukuran_vector_bytes_); hipMalloc((void **)&d_B_, ukuran_vector_bytes_); hipMalloc((void **)&d_C_, ukuran_vector_bytes_); std::cout << "Salin input dari host ke CUDA device\n"; hipMemcpy(d_A_, h_A_, ukuran_vector_bytes_, hipMemcpyHostToDevice); hipMemcpy(d_B_, h_B_, ukuran_vector_bytes_, hipMemcpyHostToDevice); int threads_per_block_ = 256; int blocks_per_grid_ = 1; dim3 block(threads_per_block_, 1, 1); dim3 grid(blocks_per_grid_, 1, 1); std::cout << "Peluncuran kernel Cuda dengan ukuran " << blocks_per_grid_ << " block " << threads_per_block_ << " threads\n"; //tambahVector<<<???, ???>>>(??,??,??,??); hipError_t err_ = hipGetLastError(); if (err_ != hipSuccess) { std::cerr << "Gagal meluncurkan kernel Cuda (error code " << hipGetErrorString(err_) << ")!\n"; exit(-1); } std::cout << "Salin data dari CUDA device ke host memory\n"; hipMemcpy(h_C_, d_C_, ukuran_vector_bytes_, hipMemcpyDeviceToHost); // verifikasi nilai for (int i = 0; i < kJumlahElemen; ++i) { if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5) { std::cerr << "Verifikasi gagal " << i << "!\n"; exit(-1); } } std::cout << "Test PASSED\n"; hipFree(d_A_); hipFree(d_B_); hipFree(d_C_); free(h_A_); free(h_B_); free(h_C_); hipDeviceReset(); std::cout << "Done\n"; return 0; }
02a7ba49e693b2a5d8f2f42147873b520467e93c.cu
#include <iostream> #include <cuda_runtime.h> #include <device_launch_parameters.h> // kernel menambahkan vector __global__ void tambahVector( const float *cVectorA, const float *cVectorB, float *cVectorC, const int cJumlahElemen) { // cari indeks saya int idx_ = 0; } // fungsi main untuk panggil kernel int main(void) { // gunakan GPU ke-1 cudaSetDevice(0); const int kJumlahElemen = 25600; size_t ukuran_vector_bytes_ = kJumlahElemen * sizeof(float); std::cout << "[Penjumlahan vector dengan jumlah elemen " << kJumlahElemen << std::endl; float *h_A_ = (float *)malloc(ukuran_vector_bytes_); float *h_B_ = (float *)malloc(ukuran_vector_bytes_); float *h_C_ = (float *)malloc(ukuran_vector_bytes_); if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL) { std::cerr << "Failed to allocate host vectors!\n"; exit(-1); } for (int i = 0; i < kJumlahElemen; ++i) { h_A_[i] = rand()/(float)RAND_MAX; h_B_[i] = rand()/(float)RAND_MAX; } float *d_A_ = NULL; float *d_B_ = NULL; float *d_C_ = NULL; cudaMalloc((void **)&d_A_, ukuran_vector_bytes_); cudaMalloc((void **)&d_B_, ukuran_vector_bytes_); cudaMalloc((void **)&d_C_, ukuran_vector_bytes_); std::cout << "Salin input dari host ke CUDA device\n"; cudaMemcpy(d_A_, h_A_, ukuran_vector_bytes_, cudaMemcpyHostToDevice); cudaMemcpy(d_B_, h_B_, ukuran_vector_bytes_, cudaMemcpyHostToDevice); int threads_per_block_ = 256; int blocks_per_grid_ = 1; dim3 block(threads_per_block_, 1, 1); dim3 grid(blocks_per_grid_, 1, 1); std::cout << "Peluncuran kernel Cuda dengan ukuran " << blocks_per_grid_ << " block " << threads_per_block_ << " threads\n"; //tambahVector<<<???, ???>>>(??,??,??,??); cudaError_t err_ = cudaGetLastError(); if (err_ != cudaSuccess) { std::cerr << "Gagal meluncurkan kernel Cuda (error code " << cudaGetErrorString(err_) << ")!\n"; exit(-1); } std::cout << "Salin data dari CUDA device ke host memory\n"; cudaMemcpy(h_C_, d_C_, ukuran_vector_bytes_, cudaMemcpyDeviceToHost); // verifikasi nilai for (int i = 0; i < kJumlahElemen; ++i) { if (fabs(h_A_[i] + h_B_[i] - h_C_[i]) > 1e-5) { std::cerr << "Verifikasi gagal " << i << "!\n"; exit(-1); } } std::cout << "Test PASSED\n"; cudaFree(d_A_); cudaFree(d_B_); cudaFree(d_C_); free(h_A_); free(h_B_); free(h_C_); cudaDeviceReset(); std::cout << "Done\n"; return 0; }
2774ff21c10d800a8b318f6e208c27efeea0557f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmergetfqmr.cu, normal z -> d, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from tfqmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_dtfqmr_1_kernel( int num_rows, int num_cols, double alpha, double sigma, double *v, double *Au, double *u_m, double *pu_m, double *u_mp1, double *w, double *d, double *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = u_mp1 - alpha*v; w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha double scalar @param[in] sigma double scalar @param[in] v magmaDouble_ptr vector @param[in] Au magmaDouble_ptr vector @param[in,out] u_m magmaDouble_ptr vector @param[in,out] pu_m magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] Ad magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_1( magma_int_t num_rows, magma_int_t num_cols, double alpha, double sigma, magmaDouble_ptr v, magmaDouble_ptr Au, magmaDouble_ptr u_m, magmaDouble_ptr pu_m, magmaDouble_ptr u_mp1, magmaDouble_ptr w, magmaDouble_ptr d, magmaDouble_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dtfqmr_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma, v, Au, u_m, pu_m, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_2_kernel( int num_rows, int num_cols, double eta, magmaDouble_ptr d, magmaDouble_ptr Ad, magmaDouble_ptr x, magmaDouble_ptr r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + eta * d r = r - eta * Ad Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] d magmaDouble_ptr vector @param[in] Ad magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_2( magma_int_t num_rows, magma_int_t num_cols, double eta, magmaDouble_ptr d, magmaDouble_ptr Ad, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dtfqmr_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, eta, d, Ad, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_3_kernel( int num_rows, int num_cols, double beta, double *w, double *u_m, double *u_mp1 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = w + beta*u_mp1 Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] w magmaDouble_ptr vector @param[in] u_m magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_3( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr w, magmaDouble_ptr u_m, magmaDouble_ptr u_mp1, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dtfqmr_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, w, u_m, u_mp1 ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_4_kernel( int num_rows, int num_cols, double beta, double *Au_new, double *v, double *Au ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = Au_new[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ] + beta * beta * v[ i+j*num_rows ]; Au[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Merges multiple operations into one kernel: v = Au_new + beta*(Au+beta*v); Au = Au_new Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] Au_new magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] Au magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_4( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr Au_new, magmaDouble_ptr v, magmaDouble_ptr Au, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dtfqmr_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, Au_new, v, Au ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_5_kernel( int num_rows, int num_cols, double alpha, double sigma, double *v, double *Au, double *u_mp1, double *w, double *d, double *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha double scalar @param[in] sigma double scalar @param[in] v magmaDouble_ptr vector @param[in] Au magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] Ad magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_5( magma_int_t num_rows, magma_int_t num_cols, double alpha, double sigma, magmaDouble_ptr v, magmaDouble_ptr Au, magmaDouble_ptr u_mp1, magmaDouble_ptr w, magmaDouble_ptr d, magmaDouble_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_dtfqmr_5_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, sigma, v, Au, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; }
2774ff21c10d800a8b318f6e208c27efeea0557f.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @generated from sparse/blas/zmergetfqmr.cu, normal z -> d, Wed Jan 2 14:18:53 2019 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_d // These routines merge multiple kernels from tfqmr into one. /* -------------------------------------------------------------------------- */ __global__ void magma_dtfqmr_1_kernel( int num_rows, int num_cols, double alpha, double sigma, double *v, double *Au, double *u_m, double *pu_m, double *u_mp1, double *w, double *d, double *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = u_m[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = pu_m[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = u_mp1 - alpha*v; w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha double scalar @param[in] sigma double scalar @param[in] v magmaDouble_ptr vector @param[in] Au magmaDouble_ptr vector @param[in,out] u_m magmaDouble_ptr vector @param[in,out] pu_m magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] Ad magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_1( magma_int_t num_rows, magma_int_t num_cols, double alpha, double sigma, magmaDouble_ptr v, magmaDouble_ptr Au, magmaDouble_ptr u_m, magmaDouble_ptr pu_m, magmaDouble_ptr u_mp1, magmaDouble_ptr w, magmaDouble_ptr d, magmaDouble_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dtfqmr_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma, v, Au, u_m, pu_m, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_2_kernel( int num_rows, int num_cols, double eta, magmaDouble_ptr d, magmaDouble_ptr Ad, magmaDouble_ptr x, magmaDouble_ptr r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + eta * d[ i+j*num_rows ]; r[ i+j*num_rows ] = r[ i+j*num_rows ] - eta * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + eta * d r = r - eta * Ad Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] eta double scalar @param[in] d magmaDouble_ptr vector @param[in] Ad magmaDouble_ptr vector @param[in,out] x magmaDouble_ptr vector @param[in,out] r magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_2( magma_int_t num_rows, magma_int_t num_cols, double eta, magmaDouble_ptr d, magmaDouble_ptr Ad, magmaDouble_ptr x, magmaDouble_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dtfqmr_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, eta, d, Ad, x, r ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_3_kernel( int num_rows, int num_cols, double beta, double *w, double *u_m, double *u_mp1 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ u_mp1[ i+j*num_rows ] = w[ i+j*num_rows ] + beta * u_m[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: u_mp1 = w + beta*u_mp1 Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] w magmaDouble_ptr vector @param[in] u_m magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_3( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr w, magmaDouble_ptr u_m, magmaDouble_ptr u_mp1, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dtfqmr_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, w, u_m, u_mp1 ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_4_kernel( int num_rows, int num_cols, double beta, double *Au_new, double *v, double *Au ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ double tmp = Au_new[ i+j*num_rows ]; v[ i+j*num_rows ] = tmp + beta * Au[ i+j*num_rows ] + beta * beta * v[ i+j*num_rows ]; Au[ i+j*num_rows ] = tmp; } } } /** Purpose ------- Merges multiple operations into one kernel: v = Au_new + beta*(Au+beta*v); Au = Au_new Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta double scalar @param[in] Au_new magmaDouble_ptr vector @param[in,out] v magmaDouble_ptr vector @param[in,out] Au magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_4( magma_int_t num_rows, magma_int_t num_cols, double beta, magmaDouble_ptr Au_new, magmaDouble_ptr v, magmaDouble_ptr Au, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dtfqmr_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, Au_new, v, Au ); return MAGMA_SUCCESS; } __global__ void magma_dtfqmr_5_kernel( int num_rows, int num_cols, double alpha, double sigma, double *v, double *Au, double *u_mp1, double *w, double *d, double *Ad ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ w[ i+j*num_rows ] = w[ i+j*num_rows ] - alpha * Au[ i+j*num_rows ]; d[ i+j*num_rows ] = u_mp1[ i+j*num_rows ] + sigma * d[ i+j*num_rows ]; Ad[ i+j*num_rows ] = Au[ i+j*num_rows ] + sigma * Ad[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: w = w - alpha*Au; d = pu_m + sigma*d; Ad = Au + sigma*Ad; @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha double scalar @param[in] sigma double scalar @param[in] v magmaDouble_ptr vector @param[in] Au magmaDouble_ptr vector @param[in,out] u_mp1 magmaDouble_ptr vector @param[in,out] w magmaDouble_ptr vector @param[in,out] d magmaDouble_ptr vector @param[in,out] Ad magmaDouble_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dgegpuk ********************************************************************/ extern "C" magma_int_t magma_dtfqmr_5( magma_int_t num_rows, magma_int_t num_cols, double alpha, double sigma, magmaDouble_ptr v, magmaDouble_ptr Au, magmaDouble_ptr u_mp1, magmaDouble_ptr w, magmaDouble_ptr d, magmaDouble_ptr Ad, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_dtfqmr_5_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, sigma, v, Au, u_mp1, w, d, Ad ); return MAGMA_SUCCESS; }
1c97cbdce6ca2c62e82b993e6dcf10504530ade4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> // void Radix(int* array, int array_size, int max_digit); /* Thread function */ __host__ void rng(int* arr, int n); /* Seed function */ __host__ int max_el(int * vec, int n); __host__ int num_digit(int el); __device__ int to_digit(int el, int divider); __host__ int to_digit_host(int el, int divider); __host__ void print_array(int * array, int n); __global__ void count_to_bucket(int * data, int * bucket, int length, int digit); __host__ void countSort(int * data, int * bucket, int length, int digit); __host__ void empty_bucket(int * bucket, int size); __host__ void print_array_file(int * array, int array_len); int main(int argc,char *argv[]) { if(argc != 2) { perror("Please specify data length"); exit(1); } printf("flag 1\n"); int data_size = strtol(argv[1], NULL, 10); int numThread = 1000; float numBlocksFloat = (float) data_size / numThread; int numBlocks = ceil(numBlocksFloat); int *global_array; int *global_bucket; int max_digit; int base= 10; printf("data size : %d\n%.f\n", data_size,numBlocksFloat); printf("flag 2 thread %d block %d \n", numThread, numBlocks); // aloocating array to be accessible by both cpu and gpu hipMallocManaged(&global_array, data_size*sizeof(int)+1); // hipMalloc(&local_array,data_size*sizeof(int)+1); rng(global_array, data_size); // hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice); printf("flag 3\n"); printf("flag 4\n"); // global_array = (*int )malloc(data_size * sizeof(int)); // initialization data print_array(global_array, data_size); max_digit = num_digit(max_el(global_array, data_size)); printf("max digit %d\n", max_digit); int bucket_el = base*max_digit; hipMallocManaged(&global_bucket, bucket_el*sizeof(int)+1); empty_bucket(global_bucket,bucket_el); for(int i = 1; i<= max_digit; i++){ hipLaunchKernelGGL(( count_to_bucket), dim3(numBlocks),dim3(numThread), 0, 0, global_array,global_bucket,data_size,i); } // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); for(int i = 0; i<max_digit; i++){ countSort(global_array, global_bucket, data_size, i); } print_array(global_bucket,bucket_el); print_array(global_array, data_size); hipFree(global_array); //hipFree(global_bucket); return 0; } __global__ void count_to_bucket(int * data, int * bucket, int length, int digit){ int block = blockIdx.x; int thread = threadIdx.x; int i = block*1000+thread; // printf("block %d thread %d\n", digit, thread); //for(int i = (digit-1)*10 + thread;i <=(digit-1)*10+thread && i < length; i++){ if(block*1000+thread < length){ int num_bucket = to_digit(data[i], digit) + 10*(digit-1); printf("%d [%d] %d\n", data[i], digit, num_bucket); bucket[num_bucket] ++; } //} }; __host__ // void countSort(int * data, int * bucket, int length, int digit){ // int *local_sort = (int*) malloc (length * sizeof(int)); // int index = 0; // // sort // // printf("local sort "); // for(int block =0; block < digit; block++){ // for(int d = 0; d < 10; d++){ // for(int j = 0; j < length; j++){ // if(to_digit_host(data[j], block) == d){ // local_sort[index] = data[j]; // index ++; // bucket[block*10+d] --; // } // if(bucket[block*10+d] == 0) { // // printf("\n"); // break; // } // } // } // } // // printf("index ends in %d \n", index); // // copy // for(int i=0; i < length; i++){ // data[i] = local_sort[i]; // } // free(local_sort); // //empty_bucket(bucket, 10); // } void countSort(int * data, int * bucket, int length, int digit){ int * local_sort = (int*) malloc (length * sizeof(int)); int index = 0; // sort // printf("local sort "); for(int i =0; i < 10; i++){ for(int j = 0; j < length; j++){ if(to_digit_host(data[j], digit+1) == i){ local_sort[index] = data[j]; index ++; bucket[digit*10+i] --; } if(bucket[digit*10+i] == 0) { // printf("\n"); break; } } } // printf("index ends in %d \n", index); // copy for(int i=0; i < length; i++){ data[i] = local_sort[i]; } free(local_sort); empty_bucket(bucket, 10); } __host__ void empty_bucket(int * bucket, int size){ for(int i = 0; i < size; i++){ bucket[i] = 0; } } __host__ void rng(int* arr, int n) { int seed = 13516123; srand(seed); for(long i = 0; i < n; i++) { arr[i] = (int)rand(); } } __host__ int max_el(int * vec, int n){ int max = vec[0]; for(int i = 0; i < n; i++){ if(vec[i] > max) max = vec[i]; } return max; }; __device__ int to_digit(int el, int divider){ for(int i = 1; i< divider; i++){ el /= 10; } return el % 10; }; __host__ int to_digit_host(int el, int divider){ for(int i = 1; i< divider; i++){ el /= 10; } return el % 10; }; __host__ void print_array(int * array, int array_len){ int n = array_len; for(int i = 0; i < n; i++){ printf("%d ", array[i]); } printf("\n"); } __host__ void print_array_file(int * array, int array_len){ int n = array_len; FILE * fp; FILE * fo; int i; /* open the file for writing*/ fp = fopen ("../test/result.txt","w"); fo = fopen ("../output/output.txt","w"); /* write 10 lines of text into the file stream*/ for(i = 0; i < n;i++){ fprintf (fp, "%d ", array[i]); fprintf (fo, "%d ", array[i]); } fprintf (fp, "\n "); fprintf (fo, "\n "); /* close the file*/ fclose (fp); fclose (fo); } __host__ int num_digit(int el){ int digit = 1; while(el > 9){ el /= 10; digit++; } return digit; };
1c97cbdce6ca2c62e82b993e6dcf10504530ade4.cu
#include <stdio.h> #include <math.h> #include <cuda.h> // void Radix(int* array, int array_size, int max_digit); /* Thread function */ __host__ void rng(int* arr, int n); /* Seed function */ __host__ int max_el(int * vec, int n); __host__ int num_digit(int el); __device__ int to_digit(int el, int divider); __host__ int to_digit_host(int el, int divider); __host__ void print_array(int * array, int n); __global__ void count_to_bucket(int * data, int * bucket, int length, int digit); __host__ void countSort(int * data, int * bucket, int length, int digit); __host__ void empty_bucket(int * bucket, int size); __host__ void print_array_file(int * array, int array_len); int main(int argc,char *argv[]) { if(argc != 2) { perror("Please specify data length"); exit(1); } printf("flag 1\n"); int data_size = strtol(argv[1], NULL, 10); int numThread = 1000; float numBlocksFloat = (float) data_size / numThread; int numBlocks = ceil(numBlocksFloat); int *global_array; int *global_bucket; int max_digit; int base= 10; printf("data size : %d\n%.f\n", data_size,numBlocksFloat); printf("flag 2 thread %d block %d \n", numThread, numBlocks); // aloocating array to be accessible by both cpu and gpu cudaMallocManaged(&global_array, data_size*sizeof(int)+1); // cudaMalloc(&local_array,data_size*sizeof(int)+1); rng(global_array, data_size); // cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); printf("flag 3\n"); printf("flag 4\n"); // global_array = (*int )malloc(data_size * sizeof(int)); // initialization data print_array(global_array, data_size); max_digit = num_digit(max_el(global_array, data_size)); printf("max digit %d\n", max_digit); int bucket_el = base*max_digit; cudaMallocManaged(&global_bucket, bucket_el*sizeof(int)+1); empty_bucket(global_bucket,bucket_el); for(int i = 1; i<= max_digit; i++){ count_to_bucket<<<numBlocks,numThread>>>(global_array,global_bucket,data_size,i); } // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); for(int i = 0; i<max_digit; i++){ countSort(global_array, global_bucket, data_size, i); } print_array(global_bucket,bucket_el); print_array(global_array, data_size); cudaFree(global_array); //cudaFree(global_bucket); return 0; } __global__ void count_to_bucket(int * data, int * bucket, int length, int digit){ int block = blockIdx.x; int thread = threadIdx.x; int i = block*1000+thread; // printf("block %d thread %d\n", digit, thread); //for(int i = (digit-1)*10 + thread;i <=(digit-1)*10+thread && i < length; i++){ if(block*1000+thread < length){ int num_bucket = to_digit(data[i], digit) + 10*(digit-1); printf("%d [%d] %d\n", data[i], digit, num_bucket); bucket[num_bucket] ++; } //} }; __host__ // void countSort(int * data, int * bucket, int length, int digit){ // int *local_sort = (int*) malloc (length * sizeof(int)); // int index = 0; // // sort // // printf("local sort "); // for(int block =0; block < digit; block++){ // for(int d = 0; d < 10; d++){ // for(int j = 0; j < length; j++){ // if(to_digit_host(data[j], block) == d){ // local_sort[index] = data[j]; // index ++; // bucket[block*10+d] --; // } // if(bucket[block*10+d] == 0) { // // printf("\n"); // break; // } // } // } // } // // printf("index ends in %d \n", index); // // copy // for(int i=0; i < length; i++){ // data[i] = local_sort[i]; // } // free(local_sort); // //empty_bucket(bucket, 10); // } void countSort(int * data, int * bucket, int length, int digit){ int * local_sort = (int*) malloc (length * sizeof(int)); int index = 0; // sort // printf("local sort "); for(int i =0; i < 10; i++){ for(int j = 0; j < length; j++){ if(to_digit_host(data[j], digit+1) == i){ local_sort[index] = data[j]; index ++; bucket[digit*10+i] --; } if(bucket[digit*10+i] == 0) { // printf("\n"); break; } } } // printf("index ends in %d \n", index); // copy for(int i=0; i < length; i++){ data[i] = local_sort[i]; } free(local_sort); empty_bucket(bucket, 10); } __host__ void empty_bucket(int * bucket, int size){ for(int i = 0; i < size; i++){ bucket[i] = 0; } } __host__ void rng(int* arr, int n) { int seed = 13516123; srand(seed); for(long i = 0; i < n; i++) { arr[i] = (int)rand(); } } __host__ int max_el(int * vec, int n){ int max = vec[0]; for(int i = 0; i < n; i++){ if(vec[i] > max) max = vec[i]; } return max; }; __device__ int to_digit(int el, int divider){ for(int i = 1; i< divider; i++){ el /= 10; } return el % 10; }; __host__ int to_digit_host(int el, int divider){ for(int i = 1; i< divider; i++){ el /= 10; } return el % 10; }; __host__ void print_array(int * array, int array_len){ int n = array_len; for(int i = 0; i < n; i++){ printf("%d ", array[i]); } printf("\n"); } __host__ void print_array_file(int * array, int array_len){ int n = array_len; FILE * fp; FILE * fo; int i; /* open the file for writing*/ fp = fopen ("../test/result.txt","w"); fo = fopen ("../output/output.txt","w"); /* write 10 lines of text into the file stream*/ for(i = 0; i < n;i++){ fprintf (fp, "%d ", array[i]); fprintf (fo, "%d ", array[i]); } fprintf (fp, "\n "); fprintf (fo, "\n "); /* close the file*/ fclose (fp); fclose (fo); } __host__ int num_digit(int el){ int digit = 1; while(el > 9){ el /= 10; digit++; } return digit; };
5eb5871f7a0fe2e8cb683e9a63ff6f485e90e1f0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <ctype.h> #define TILE_SIZE 2 #define MAX_SIZE 2500 __global__ void MatrixMulShared(int *M, int *N, int *P, int width) { __shared__ int sharedM[TILE_SIZE][TILE_SIZE]; __shared__ int sharedN[TILE_SIZE][TILE_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_SIZE + ty; int col = bx * TILE_SIZE + tx; int sum = 0; for(int m = 0; m < width/TILE_SIZE; m++) { sharedM[ty][tx] = M[row*width + (m*TILE_SIZE + tx)]; sharedN[ty][tx] = N[(m*TILE_SIZE + ty)*width + col]; __syncthreads(); for(int k = 0; k < TILE_SIZE; k++) { sum += sharedM[m+ty][k] * sharedN[k][m+tx]; } __syncthreads(); } P[row*width+col] = sum; } void printMat(int *a, int width) { int i,j; for(i=0;i<width;i++) for(j=0;j<width;j++) printf("%d%c",a[i*width+j],(j == (width-1))?'\n':'\t'); printf("\n"); } int* generatorMatrixa(int width) { int zarodek; zarodek= time(NULL); srand(zarodek); // za zarodek wstawiamy pobrany czas w sekundach int *a = (int*)calloc(width*width, sizeof(int)); int i,j; for(i=0;i<width;i++) for(j=0;j<width;j++) a[i*width+j] = rand()%10; return a; } int *matMul(int *hostA, int *hostB, int width) { hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); int SIZE = width*width; int *hostC = (int*)calloc(SIZE,sizeof(int)); int *devA, *devB, *devC; hipMalloc((void**) &devA, SIZE*sizeof(int)); hipMalloc((void**) &devB, SIZE*sizeof(int)); hipMalloc((void**) &devC, SIZE*sizeof(int)); dim3 gridDim(width/TILE_SIZE, width/TILE_SIZE); dim3 blockDim(TILE_SIZE,TILE_SIZE); hipMemcpy(devA, hostA, SIZE*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(devB, hostB, SIZE*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(devC, hostC, SIZE*sizeof(int), hipMemcpyHostToDevice); hipEventRecord(start, 0); //rozpoczecie pomiaru czasu hipLaunchKernelGGL(( MatrixMulShared), dim3(gridDim),dim3(blockDim), 0, 0, devA, devB, devC, width); //AKCJA! hipEventRecord(stop, 0); // koniec pomiaru hipEventSynchronize(stop); // synchro hipMemcpy(hostC, devC, SIZE*sizeof(int), hipMemcpyDeviceToHost); hipEventElapsedTime(&time, start, stop); printf ("Czas trwania: %f ms\n", time); hipFree(devA);hipFree(devB);hipFree(devC); return hostC; } void oblicz(int Rozmiar){ int rozmiar = Rozmiar; int *A = generatorMatrixa(rozmiar); int *B = generatorMatrixa(rozmiar); //inicjalizacja i jednoczesne obliczenia macierzy. printf("Dla macierzy rozmiarw = %i ", rozmiar); int *C = matMul(A,B,rozmiar); //printf("\n"); //printMat(C,m); free(A); free(B); free(C); } int main() { //seryjka int i; for ( i = 0; i < MAX_SIZE; ++i) if (i%100 == 0) { oblicz(i); } return 0; }
5eb5871f7a0fe2e8cb683e9a63ff6f485e90e1f0.cu
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <ctype.h> #define TILE_SIZE 2 #define MAX_SIZE 2500 __global__ void MatrixMulShared(int *M, int *N, int *P, int width) { __shared__ int sharedM[TILE_SIZE][TILE_SIZE]; __shared__ int sharedN[TILE_SIZE][TILE_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_SIZE + ty; int col = bx * TILE_SIZE + tx; int sum = 0; for(int m = 0; m < width/TILE_SIZE; m++) { sharedM[ty][tx] = M[row*width + (m*TILE_SIZE + tx)]; sharedN[ty][tx] = N[(m*TILE_SIZE + ty)*width + col]; __syncthreads(); for(int k = 0; k < TILE_SIZE; k++) { sum += sharedM[m+ty][k] * sharedN[k][m+tx]; } __syncthreads(); } P[row*width+col] = sum; } void printMat(int *a, int width) { int i,j; for(i=0;i<width;i++) for(j=0;j<width;j++) printf("%d%c",a[i*width+j],(j == (width-1))?'\n':'\t'); printf("\n"); } int* generatorMatrixa(int width) { int zarodek; zarodek= time(NULL); srand(zarodek); // za zarodek wstawiamy pobrany czas w sekundach int *a = (int*)calloc(width*width, sizeof(int)); int i,j; for(i=0;i<width;i++) for(j=0;j<width;j++) a[i*width+j] = rand()%10; return a; } int *matMul(int *hostA, int *hostB, int width) { cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); int SIZE = width*width; int *hostC = (int*)calloc(SIZE,sizeof(int)); int *devA, *devB, *devC; cudaMalloc((void**) &devA, SIZE*sizeof(int)); cudaMalloc((void**) &devB, SIZE*sizeof(int)); cudaMalloc((void**) &devC, SIZE*sizeof(int)); dim3 gridDim(width/TILE_SIZE, width/TILE_SIZE); dim3 blockDim(TILE_SIZE,TILE_SIZE); cudaMemcpy(devA, hostA, SIZE*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devB, hostB, SIZE*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(devC, hostC, SIZE*sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); //rozpoczecie pomiaru czasu MatrixMulShared<<<gridDim,blockDim>>>(devA, devB, devC, width); //AKCJA! cudaEventRecord(stop, 0); // koniec pomiaru cudaEventSynchronize(stop); // synchro cudaMemcpy(hostC, devC, SIZE*sizeof(int), cudaMemcpyDeviceToHost); cudaEventElapsedTime(&time, start, stop); printf ("Czas trwania: %f ms\n", time); cudaFree(devA);cudaFree(devB);cudaFree(devC); return hostC; } void oblicz(int Rozmiar){ int rozmiar = Rozmiar; int *A = generatorMatrixa(rozmiar); int *B = generatorMatrixa(rozmiar); //inicjalizacja i jednoczesne obliczenia macierzy. printf("Dla macierzy rozmiarów = %i ", rozmiar); int *C = matMul(A,B,rozmiar); //printf("\n"); //printMat(C,m); free(A); free(B); free(C); } int main() { //seryjka int i; for ( i = 0; i < MAX_SIZE; ++i) if (i%100 == 0) { oblicz(i); } return 0; }
384f973aff8a56958c86059bab570b5d2af5b5e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "headers.h" __global__ void convolution1D(const int *d_arr, const int *d_conv, int *d_result, int N, int M) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int globalId = i*N + j; if(globalId < N) { int convSum = 0, cnum = 0, k = M/2; for(int i=-k; i<=k; i++) { if(globalId + i >= 0 && globalId + i < N && cnum < M) { convSum += d_arr[globalId + i]*d_conv[cnum]; } cnum++; } d_result[globalId] = convSum; } } __global__ void convolution2D(const float *d_arr, const float *d_mask, float *d_result, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int globalId = i*N + j; if(i < N && j< N) { float avgSum = 0; int id, cnum = 0; for(int p = i-1; p <= i+1; p++) { for(int q = j-1; q<= j+1; q++) { if(p >=0 && p < N && q>=0 && q < N) { id = p*N + q; avgSum += d_arr[id]*d_mask[cnum]; } cnum++; } } d_result[globalId] = avgSum; } }
384f973aff8a56958c86059bab570b5d2af5b5e2.cu
#include "headers.h" __global__ void convolution1D(const int *d_arr, const int *d_conv, int *d_result, int N, int M) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int globalId = i*N + j; if(globalId < N) { int convSum = 0, cnum = 0, k = M/2; for(int i=-k; i<=k; i++) { if(globalId + i >= 0 && globalId + i < N && cnum < M) { convSum += d_arr[globalId + i]*d_conv[cnum]; } cnum++; } d_result[globalId] = convSum; } } __global__ void convolution2D(const float *d_arr, const float *d_mask, float *d_result, int N) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int globalId = i*N + j; if(i < N && j< N) { float avgSum = 0; int id, cnum = 0; for(int p = i-1; p <= i+1; p++) { for(int q = j-1; q<= j+1; q++) { if(p >=0 && p < N && q>=0 && q < N) { id = p*N + q; avgSum += d_arr[id]*d_mask[cnum]; } cnum++; } } d_result[globalId] = avgSum; } }
42ca7239b7f136139d82b73b2f055db76a7bfca7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #define CHECK(call)\ {\ const hipError_t error = call;\ if (error != hipSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\ exit(1);\ }\ } void initialInt(int *ip, int size) { for (int i=0; i<size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; printf("\nMatrix: (%d.%d)\n", nx, ny); for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id: (%d, %d), block_id: (%d, %d), coordinate: (%d, %d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char *argv[]) { printf("%s Starting...\n", argv[0]); // get device information. int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set matrix dimension. int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(int); // malloc host memory. int *h_A; h_A = (int *)malloc(nBytes); // initialize host matrix. initialInt(h_A, nxy); printMatrix(h_A, nx, ny); // malloc gpu memory. int *d_matA; hipMalloc((void **)&d_matA, nBytes); // transfer data from host to gpu. hipMemcpy(d_matA, h_A, nBytes, hipMemcpyHostToDevice); // set up execution configuation. dim3 block(4, 2); dim3 grid( (nx + block.x - 1)/block.x, (ny + block.y - 1)/block.y); // invoke the kernel hipLaunchKernelGGL(( printThreadIndex) , dim3(grid), dim3(block), 0, 0, d_matA, nx, ny); hipDeviceSynchronize(); // free the memory. hipFree(d_matA); free(h_A); // reset the device. hipDeviceReset(); return 0; }
42ca7239b7f136139d82b73b2f055db76a7bfca7.cu
#include <cuda_runtime.h> #include <stdio.h> #define CHECK(call)\ {\ const cudaError_t error = call;\ if (error != cudaSuccess)\ {\ printf("Error: %s:%d, ", __FILE__, __LINE__);\ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\ exit(1);\ }\ } void initialInt(int *ip, int size) { for (int i=0; i<size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; printf("\nMatrix: (%d.%d)\n", nx, ny); for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy * nx + ix; printf("thread_id: (%d, %d), block_id: (%d, %d), coordinate: (%d, %d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char *argv[]) { printf("%s Starting...\n", argv[0]); // get device information. int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set matrix dimension. int nx = 8; int ny = 6; int nxy = nx * ny; int nBytes = nxy * sizeof(int); // malloc host memory. int *h_A; h_A = (int *)malloc(nBytes); // initialize host matrix. initialInt(h_A, nxy); printMatrix(h_A, nx, ny); // malloc gpu memory. int *d_matA; cudaMalloc((void **)&d_matA, nBytes); // transfer data from host to gpu. cudaMemcpy(d_matA, h_A, nBytes, cudaMemcpyHostToDevice); // set up execution configuation. dim3 block(4, 2); dim3 grid( (nx + block.x - 1)/block.x, (ny + block.y - 1)/block.y); // invoke the kernel printThreadIndex <<<grid, block>>> (d_matA, nx, ny); cudaDeviceSynchronize(); // free the memory. cudaFree(d_matA); free(h_A); // reset the device. cudaDeviceReset(); return 0; }
1f025e9438c46e40070eb74261227beb99e77e7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } void printPtrInfo(char c, hipPointerAttribute_t attributes) { printf("\n\ninfo of %c\n", c); printf(" Memory type %i\n",attributes.memoryType); printf(" Type %i\n",attributes.type); printf(" Device %d\n",attributes.device); printf(" isManaged %d\n",attributes.isManaged); } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float), hipMemAttachHost); hipMemAdvise(x, N*sizeof(float), hipMemAdviseSetPreferredLocation, 0); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // get device properties hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); printf("0 Unregistered\n1 Host\n2 Device\n3 Managed\n"); hipPointerAttribute_t attributes; printf("Pointer info\n"); hipPointerGetAttributes(&attributes, x); printPtrInfo('x', attributes); hipPointerGetAttributes(&attributes, y); printPtrInfo('y', attributes); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
1f025e9438c46e40070eb74261227beb99e77e7a.cu
#include <iostream> #include <math.h> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } void printPtrInfo(char c, cudaPointerAttributes attributes) { printf("\n\ninfo of %c\n", c); printf(" Memory type %i\n",attributes.memoryType); printf(" Type %i\n",attributes.type); printf(" Device %d\n",attributes.device); printf(" isManaged %d\n",attributes.isManaged); } int main(void) { int N = 1<<20; float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float), cudaMemAttachHost); cudaMemAdvise(x, N*sizeof(float), cudaMemAdviseSetPreferredLocation, 0); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // get device properties cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); printf("0 Unregistered\n1 Host\n2 Device\n3 Managed\n"); cudaPointerAttributes attributes; printf("Pointer info\n"); cudaPointerGetAttributes(&attributes, x); printPtrInfo('x', attributes); cudaPointerGetAttributes(&attributes, y); printPtrInfo('y', attributes); // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
198810b57e64171347ff9db08fac07899221b02e.hip
// !!! This is a file automatically generated by hipify!!! #include<cuda.h> #include<stdio.h> #include<math.h> #include<cuda_runtime.h> #include<stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <sys/time.h> __global__ void find_prime(int N,int* a,float* b,int* c) { //*p_size = s+1; //__shared__ cuda_count = 1; //__shared__ cuda_largest = 2; int i = blockIdx.x; //atomicAdd(&cuda_count,i); //__syncthreads(); //to find a[i] is prime or not int flag = 0; for(int j=3;j<=b[i];j=j+2) { if(a[i]%j==0) { flag = 1; c[i] = -1; break; } } } int* prime_numbers; float* sqrt_prime; int *cuda_prime; float *cuda_sqrt; int *is_prime; int *cuda_is_prime; int main(int argc,char *args[]) { if(argc!=3) { printf("./GPU_Prime -t Problem_Size\n"); return 0; } struct timeval time; int count = 1; int largest = 2; int problem_size = atoi(args[2]); printf("Problem Size %d\n",problem_size); int no_of_elements = 0; problem_size = problem_size - 2;//for 1 and 2 if(problem_size%2 == 0) { no_of_elements = problem_size/2; } else { no_of_elements = problem_size/2 + 1; } prime_numbers = (int *)malloc(no_of_elements*sizeof(int)); sqrt_prime = (float *)malloc(no_of_elements*sizeof(float)); is_prime = (int *)malloc(no_of_elements*sizeof(int)); int h = 3; for(int f=0;f<no_of_elements;f++) { prime_numbers[f] = h; sqrt_prime[f] = sqrt(h); is_prime[f] = 1; //printf("prime[%d] = %d sqrt[%d] = %f\n",f,prime_numbers[f],f,sqrt_prime[f]); h = h+2; } gettimeofday(&time,NULL); double t1 = time.tv_sec + (time.tv_usec/1000000.0); hipMalloc ( (void**)&cuda_prime, no_of_elements * sizeof (int) ); hipMalloc ( (void**)&cuda_sqrt, no_of_elements * sizeof (float) ); hipMalloc ( (void**)&cuda_is_prime, no_of_elements * sizeof (int) ); hipMemcpy( cuda_prime, prime_numbers, no_of_elements * sizeof(int), hipMemcpyHostToDevice); hipMemcpy( cuda_sqrt, sqrt_prime, no_of_elements * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy( cuda_is_prime, is_prime, no_of_elements * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( find_prime), dim3(no_of_elements),dim3(1), 0, 0, no_of_elements,cuda_prime,cuda_sqrt,cuda_is_prime); hipMemcpy( is_prime, cuda_is_prime , no_of_elements * sizeof(int), hipMemcpyDeviceToHost); for(int g=0;g<no_of_elements;g++) { //printf("is_prime[%d] = %d number %d\n",g,is_prime[g],prime_numbers[g]); if(is_prime[g]!=-1) { count++; largest = prime_numbers[g]; } } printf("Count %d\n Largest %d\n",count,largest); gettimeofday(&time,NULL); double t2 = time.tv_sec + (time.tv_usec / 1000000.0); printf("Time Taken %f \n",t2-t1); hipFree(cuda_prime); hipFree(cuda_sqrt); hipFree(cuda_is_prime); }
198810b57e64171347ff9db08fac07899221b02e.cu
#include<cuda.h> #include<stdio.h> #include<math.h> #include<cuda_runtime.h> #include<stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <sys/time.h> __global__ void find_prime(int N,int* a,float* b,int* c) { //*p_size = s+1; //__shared__ cuda_count = 1; //__shared__ cuda_largest = 2; int i = blockIdx.x; //atomicAdd(&cuda_count,i); //__syncthreads(); //to find a[i] is prime or not int flag = 0; for(int j=3;j<=b[i];j=j+2) { if(a[i]%j==0) { flag = 1; c[i] = -1; break; } } } int* prime_numbers; float* sqrt_prime; int *cuda_prime; float *cuda_sqrt; int *is_prime; int *cuda_is_prime; int main(int argc,char *args[]) { if(argc!=3) { printf("./GPU_Prime -t Problem_Size\n"); return 0; } struct timeval time; int count = 1; int largest = 2; int problem_size = atoi(args[2]); printf("Problem Size %d\n",problem_size); int no_of_elements = 0; problem_size = problem_size - 2;//for 1 and 2 if(problem_size%2 == 0) { no_of_elements = problem_size/2; } else { no_of_elements = problem_size/2 + 1; } prime_numbers = (int *)malloc(no_of_elements*sizeof(int)); sqrt_prime = (float *)malloc(no_of_elements*sizeof(float)); is_prime = (int *)malloc(no_of_elements*sizeof(int)); int h = 3; for(int f=0;f<no_of_elements;f++) { prime_numbers[f] = h; sqrt_prime[f] = sqrt(h); is_prime[f] = 1; //printf("prime[%d] = %d sqrt[%d] = %f\n",f,prime_numbers[f],f,sqrt_prime[f]); h = h+2; } gettimeofday(&time,NULL); double t1 = time.tv_sec + (time.tv_usec/1000000.0); cudaMalloc ( (void**)&cuda_prime, no_of_elements * sizeof (int) ); cudaMalloc ( (void**)&cuda_sqrt, no_of_elements * sizeof (float) ); cudaMalloc ( (void**)&cuda_is_prime, no_of_elements * sizeof (int) ); cudaMemcpy( cuda_prime, prime_numbers, no_of_elements * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy( cuda_sqrt, sqrt_prime, no_of_elements * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy( cuda_is_prime, is_prime, no_of_elements * sizeof(int), cudaMemcpyHostToDevice); find_prime<<<no_of_elements,1>>>(no_of_elements,cuda_prime,cuda_sqrt,cuda_is_prime); cudaMemcpy( is_prime, cuda_is_prime , no_of_elements * sizeof(int), cudaMemcpyDeviceToHost); for(int g=0;g<no_of_elements;g++) { //printf("is_prime[%d] = %d number %d\n",g,is_prime[g],prime_numbers[g]); if(is_prime[g]!=-1) { count++; largest = prime_numbers[g]; } } printf("Count %d\n Largest %d\n",count,largest); gettimeofday(&time,NULL); double t2 = time.tv_sec + (time.tv_usec / 1000000.0); printf("Time Taken %f \n",t2-t1); cudaFree(cuda_prime); cudaFree(cuda_sqrt); cudaFree(cuda_is_prime); }
eadf77f634f4767cc7a7ee9cb50a98a01686f802.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "device_launch_parameters.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; bools[index] = idata[index] != 0 ? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (bools[index] == 1) odata[indices[index]] = idata[index]; } } }
eadf77f634f4767cc7a7ee9cb50a98a01686f802.cu
#include "common.h" #include "device_launch_parameters.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; bools[index] = idata[index] != 0 ? 1 : 0; } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockDim.x * blockIdx.x); if (index >= n) return; if (bools[index] == 1) odata[indices[index]] = idata[index]; } } }
f4b7a97303b6679332dba147687a5120693b69ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> template <typename CmplxType> __global__ void degrid_kernel(CmplxType* __restrict out, const CmplxType* __restrict in, const size_t npts, const CmplxType* __restrict img, const size_t img_dim, const CmplxType* __restrict gcf) { const int blockIdx_x = blockIdx.x; const int blockDim_x = blockDim.x; const int threadIdx_x = threadIdx.x; const int gridDim_x = gridDim.x; const int blockDim_y = blockDim.y; const int threadIdx_y = threadIdx.y; for (int n = 32*blockIdx_x; n < npts; n += 32*gridDim_x) { for (int q = threadIdx_y; q < 32; q += blockDim_y) { CmplxType inn = in[n+q]; const int sub_x = floorf(GCF_GRID*(inn.x-floorf(inn.x))); const int sub_y = floorf(GCF_GRID*(inn.y-floorf(inn.y))); const int main_x = floorf(inn.x); const int main_y = floorf(inn.y); CmplxType sum = {0,0}; for(int a = threadIdx_x-GCF_DIM/2; a < GCF_DIM/2; a += blockDim_x) for(int b = -GCF_DIM/2; b < GCF_DIM/2; b++) { auto r1 = img[main_x+a+img_dim*(main_y+b)].x; auto i1 = img[main_x+a+img_dim*(main_y+b)].y; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= img_dim || main_y+b >= img_dim) { r1 = i1 = 0; } auto r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; auto i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; sum.x += r1*r2 - i1*i2; sum.y += r1*i2 + r2*i1; } for(int s = blockDim_x < 16 ? blockDim_x : 16; s>0;s/=2) { sum.x += __shfl_down_sync(0xFFFFFFFF,sum.x,s); sum.y += __shfl_down_sync(0xFFFFFFFF,sum.y,s); } if (threadIdx_x == 0) { out[n+q] = sum; } } } } template <typename CmplxType> void degridGPU(CmplxType* out, CmplxType* in, CmplxType *img, CmplxType *gcf) { //degrid on the GPU // out (out) - the output values for each location // in (in) - the locations to be interpolated // img (in) - the image // gcf (in) - the gridding convolution function //img is padded to avoid overruns. Subtract to find the real head img -= IMG_SIZE*GCF_DIM+GCF_DIM; CmplxType *d_img; hipMalloc((void**)&d_img, sizeof(CmplxType)* (IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)); hipMemcpy(d_img, img, sizeof(CmplxType)* (IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM), hipMemcpyHostToDevice); CmplxType *d_gcf; hipMalloc((void**)&d_gcf, sizeof(CmplxType)*64*GCF_DIM*GCF_DIM); hipMemcpy(d_gcf, gcf, sizeof(CmplxType)*64*GCF_DIM*GCF_DIM, hipMemcpyHostToDevice); CmplxType *d_out; hipMalloc((void**)&d_out, sizeof(CmplxType)*NPOINTS); CmplxType *d_in; hipMalloc((void**)&d_in, sizeof(CmplxType)*NPOINTS); hipMemcpy(d_in, in, sizeof(CmplxType)*NPOINTS, hipMemcpyHostToDevice); // NPOINTS is a multiple of 32 dim3 grid(NPOINTS/32, 1); dim3 block(32, 8); hipDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int n = 0; n < REPEAT; n++) { // GCF_DIM is at least 32 hipLaunchKernelGGL(( degrid_kernel), dim3(grid), dim3(block), 0, 0, d_out, d_in, NPOINTS, d_img + IMG_SIZE*GCF_DIM+GCF_DIM, IMG_SIZE, d_gcf + GCF_DIM*(GCF_DIM+1)/2); } hipDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Average kernel execution time " << (time * 1e-9f) / REPEAT << " (s)\n"; hipMemcpy(out, d_out, sizeof(CmplxType)*NPOINTS, hipMemcpyDeviceToHost); hipFree(d_img); hipFree(d_in); hipFree(d_out); hipFree(d_gcf); }
f4b7a97303b6679332dba147687a5120693b69ff.cu
#include <chrono> template <typename CmplxType> __global__ void degrid_kernel(CmplxType* __restrict out, const CmplxType* __restrict in, const size_t npts, const CmplxType* __restrict img, const size_t img_dim, const CmplxType* __restrict gcf) { const int blockIdx_x = blockIdx.x; const int blockDim_x = blockDim.x; const int threadIdx_x = threadIdx.x; const int gridDim_x = gridDim.x; const int blockDim_y = blockDim.y; const int threadIdx_y = threadIdx.y; for (int n = 32*blockIdx_x; n < npts; n += 32*gridDim_x) { for (int q = threadIdx_y; q < 32; q += blockDim_y) { CmplxType inn = in[n+q]; const int sub_x = floorf(GCF_GRID*(inn.x-floorf(inn.x))); const int sub_y = floorf(GCF_GRID*(inn.y-floorf(inn.y))); const int main_x = floorf(inn.x); const int main_y = floorf(inn.y); CmplxType sum = {0,0}; for(int a = threadIdx_x-GCF_DIM/2; a < GCF_DIM/2; a += blockDim_x) for(int b = -GCF_DIM/2; b < GCF_DIM/2; b++) { auto r1 = img[main_x+a+img_dim*(main_y+b)].x; auto i1 = img[main_x+a+img_dim*(main_y+b)].y; if (main_x+a < 0 || main_y+b < 0 || main_x+a >= img_dim || main_y+b >= img_dim) { r1 = i1 = 0; } auto r2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].x; auto i2 = gcf[GCF_DIM*GCF_DIM*(GCF_GRID*sub_y+sub_x) + GCF_DIM*b+a].y; sum.x += r1*r2 - i1*i2; sum.y += r1*i2 + r2*i1; } for(int s = blockDim_x < 16 ? blockDim_x : 16; s>0;s/=2) { sum.x += __shfl_down_sync(0xFFFFFFFF,sum.x,s); sum.y += __shfl_down_sync(0xFFFFFFFF,sum.y,s); } if (threadIdx_x == 0) { out[n+q] = sum; } } } } template <typename CmplxType> void degridGPU(CmplxType* out, CmplxType* in, CmplxType *img, CmplxType *gcf) { //degrid on the GPU // out (out) - the output values for each location // in (in) - the locations to be interpolated // img (in) - the image // gcf (in) - the gridding convolution function //img is padded to avoid overruns. Subtract to find the real head img -= IMG_SIZE*GCF_DIM+GCF_DIM; CmplxType *d_img; cudaMalloc((void**)&d_img, sizeof(CmplxType)* (IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM)); cudaMemcpy(d_img, img, sizeof(CmplxType)* (IMG_SIZE*IMG_SIZE+2*IMG_SIZE*GCF_DIM+2*GCF_DIM), cudaMemcpyHostToDevice); CmplxType *d_gcf; cudaMalloc((void**)&d_gcf, sizeof(CmplxType)*64*GCF_DIM*GCF_DIM); cudaMemcpy(d_gcf, gcf, sizeof(CmplxType)*64*GCF_DIM*GCF_DIM, cudaMemcpyHostToDevice); CmplxType *d_out; cudaMalloc((void**)&d_out, sizeof(CmplxType)*NPOINTS); CmplxType *d_in; cudaMalloc((void**)&d_in, sizeof(CmplxType)*NPOINTS); cudaMemcpy(d_in, in, sizeof(CmplxType)*NPOINTS, cudaMemcpyHostToDevice); // NPOINTS is a multiple of 32 dim3 grid(NPOINTS/32, 1); dim3 block(32, 8); cudaDeviceSynchronize(); auto start = std::chrono::steady_clock::now(); for (int n = 0; n < REPEAT; n++) { // GCF_DIM is at least 32 degrid_kernel<<<grid, block>>> (d_out, d_in, NPOINTS, d_img + IMG_SIZE*GCF_DIM+GCF_DIM, IMG_SIZE, d_gcf + GCF_DIM*(GCF_DIM+1)/2); } cudaDeviceSynchronize(); auto end = std::chrono::steady_clock::now(); auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); std::cout << "Average kernel execution time " << (time * 1e-9f) / REPEAT << " (s)\n"; cudaMemcpy(out, d_out, sizeof(CmplxType)*NPOINTS, cudaMemcpyDeviceToHost); cudaFree(d_img); cudaFree(d_in); cudaFree(d_out); cudaFree(d_gcf); }
269ebd0826dd28c0eab7f6e9b58b79dc20de8671.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n % inf_bs == 0 and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each. */ __global__ void slansy_inf_kernel_special_l( int n, const float* A, int lda, float *dwork ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; A += ind; A += ty * lda; int break_d = blockIdx.x*inf_bs; // loop over all 32x32 blocks left of the diagonal block for(int i=0; i < break_d; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } // 32x4 threads cooperatively load 32x32 diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); // symmetrize block // TODO make diagonal element real #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else la[tx][i] = la[tx][i]; // TODO: not needed } __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); // loop over all 32x32 blocks below diagonal block for(int i=break_d; i < n; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); } // store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // 32x1 threads compute final result of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower */ __global__ void slansy_inf_kernel_generic_l( int n, const float* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x == n_full_block ) { /************************************************************************ -- Last (partial) block -- -- We will do something unusual here -- Threads past end of matrix (i.e., ind >= n) are redundantly assigned -- the last row (n-1). At the end, those results are ignored -- only -- results for ind < n are saved into dwork. -- For sufficiently large matrix the overhead will be very low *************************************************************************/ if ( tx < n_mod_bs ) { A += ( blockIdx.x*inf_bs + tx ); } else { A += ( blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } /* we don't need to make results for rows >= n zero, as those computation will be discarded. */ if ( ty == 0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j; int count = 1; // TODO don't need initialization if ( tx < n_mod_bs ) count = tx; else count = n_mod_bs; for(j=0; j <= count; j++) { res += fabsf( A[j*lda] ); } A += tx*lda; count = 1; for( ; j < n_mod_bs; j++) { res += fabsf( A[count] ); count++; } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if ( tx < n_mod_bs ) dwork[ind] = res; } } else { /*----------------------------------- -- All the blocks but the last one -- -------------------------------------*/ A += ind; A += ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else la[tx][i] = la[tx][i]; // TODO: not needed } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); n -= n_mod_bs; /*----------------------------- Go Down -------------------------------*/ for(int i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing n_mod_bs stuffs here. Symmetric is giving us benefit .. true -----------------------------------------------*/ A -= tx; if ( tx < n_mod_bs ) { A += tx; } else { A += (n_mod_bs-1); /* Same as above */ } #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) la[ty+j][tx] = A[j*lda]; //MAGMA_S_MUL( MAGMA_S_ONE, A[j*lda] ); // huh? just A[j*lda]? else la[ty+j][tx] = MAGMA_S_ZERO; //MAGMA_S_MUL( MAGMA_S_ZERO, A[j*lda] ); // huh? just 0? } __syncthreads(); /*---------------------------------------- What about doing some Zeroing here? instead of zeroing before? -----------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper */ __global__ void slansy_inf_kernel_generic_u( int n, const float* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; int blockIdxx = blockIdx.x; if ( blockIdx.x == n_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ ind = tx; A += lda*(n-1); if ( tx < n_mod_bs ) { A += tx; } else { A += (n_mod_bs - 1); } A -= ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if ( ty == 0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j; int count = 1; if ( tx < n_mod_bs ) count = n_mod_bs- tx; else count = n_mod_bs; for(j=0; j < count; j++) { res += fabsf( A[-j*lda] ); } A -= (count-1)*lda; count = 1; for( ; j < n_mod_bs; j++) { res += fabsf( A[-count] ); count++; } } else { } __syncthreads(); la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if ( tx < n_mod_bs ) dwork[ind] = res; } } else { /*----------------------------------- -- All the blocks but the last one -- -- By the way this code can be optimized more. -------------------------------------*/ ind = blockIdx.x*inf_bs + tx + n_mod_bs; const float *A1 = A; A += lda*(n-1); A += ind; A -= ty * lda; int break_d = (n/inf_bs - blockIdxx - 1)*inf_bs; /*---------------------------- Go Left -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][31-ty-j] = A[ -j * lda]; } A -= inf_bs; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else { la[tx][i] = la[tx][i]; // TODO: not needed } } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); n -= n_mod_bs; /*----------------------------- Go Up -------------------------------*/ int i; for( i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[- j * lda]; } A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf ( la[31-tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing n_mod_bs stuffs here. Symmetric is giving us benefit .. true Do the other way please...... see slansy_inf_kernel_generic_l code above TODO compare performance with lower case and use that implementation if better. -----------------------------------------------*/ A1 = A1 + n_mod_bs*lda + tx*lda; if ( ty == 0 ) { for( int j = 0; j < n_mod_bs; j++) { res += fabsf( A1[ j + lda * blockIdx.x * inf_bs ] ); } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n % inf_bs == 0 and A is stored upper */ __global__ void slansy_inf_kernel_special_u( int n, const float* A, int lda, float *dwork ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; /* Reverse Computation ... - Left - Triangle - Up */ A += lda*(n-1); __shared__ float la[inf_bs][inf_bs+1]; A += ind; A -= ty * lda; int break_d = (n / inf_bs - blockIdx.x-1 )*inf_bs; for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[tx][31-ty-j] = A[ -j * lda]; /* Look at the indexing changes */ A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else { la[tx][i] = la[tx][i]; // TODO: not needed } } __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); for(int i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[ -j * lda]; A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[31-tx][j+ty*8] ); } __syncthreads(); } la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void slansy_inf( magma_uplo_t uplo, int n, const float *A, int lda, float *dwork ) { /* Note: The UPLO = 'U' Version can be optimized more. */ int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); if ( n % inf_bs == 0 ) { if ( uplo == 'L' || uplo == 'l') { hipLaunchKernelGGL(( slansy_inf_kernel_special_l), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( slansy_inf_kernel_special_u), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork); } } else { int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == 'L' || uplo == 'l') { hipLaunchKernelGGL(( slansy_inf_kernel_generic_l), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } else { hipLaunchKernelGGL(( slansy_inf_kernel_generic_u), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork, n_full_block, n_mod_bs ); } } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void slansy_max_kernel_l( int n, const float* A, int lda, float *dwork ) { int tx = threadIdx.x; int ind = blockIdx.x * max_bs + tx; float res = 0., res1; int break_d = blockIdx.x * max_bs; if (ind < n) { A += ind; // loop over blocks left of diagonal block for(int i=0; i < break_d; i += max_bs ) { #pragma unroll 8 for(int j=0; j < max_bs; j++) { res1 = fabsf( A[j*lda] ); res = fmax( res, res1 ); } A += lda*max_bs; } // process diagonal block for(int j=0; j <= tx; j++) { res1 = fabsf( A[j*lda] ); res = fmax( res, res1 ); } dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. * TODO compare performance with lower case and use that implementation if better. */ __global__ void slansy_max_kernel_u( int n, const float* A, int lda, float *dwork ) { int ind = blockIdx.x * max_bs + threadIdx.x; float res = 0.; A += ind; if (ind < n) { for(int j=n-1; j >= ind; j--) res = fmax( res, fabsf( A[j*lda] ) ); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void slansy_max( magma_uplo_t uplo, int n, const float *A, int lda, float *dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == 'L' || uplo == 'l' ) { hipLaunchKernelGGL(( slansy_max_kernel_l), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } else { hipLaunchKernelGGL(( slansy_max_kernel_u), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, dwork ); } } /* ====================================================================== */ /* Purpose ======= SLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. SLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for CUDA_ARCH >= 200 ( ( normI(A), NORM = 'I' or 'i' ** supported only for CUDA_ARCH >= 200 ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value. Arguments: ========== NORM (input) CHARACTER*1 Specifies the value to be returned in SLANSY as described above. UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. = 'U': Upper triangular part of A is referenced = 'L': Lower triangular part of A is referenced N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, SLANSY is set to zero. A (input) REAL array on the GPU, dimension (LDA,N) The symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(N,1). DWORK (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is only required for norm1 and normI. */ extern "C" float magmablas_slansy( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, const float *A, magma_int_t lda, float *dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == 'I' || norm == 'i' || norm == '1' || norm == 'O' || norm == 'o'); bool max_norm = (norm == 'M' || norm == 'm'); if ( ! max_norm && (! inf_norm || arch < 200) ) info = -1; else if ( uplo != 'u' && uplo != 'U' && uplo != 'l' && uplo != 'L' ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { slansy_inf( uplo, n, A, lda, dwork ); int i = hipblasIsamax( n, dwork, 1 ) - 1; hipMemcpy( &res, &dwork[i], sizeof(float), hipMemcpyDeviceToHost ); } else if ( max_norm ) { slansy_max( uplo, n, A, lda, dwork ); int i = hipblasIsamax( n, dwork, 1 ) - 1; hipMemcpy( &res, &dwork[i], sizeof(float), hipMemcpyDeviceToHost ); } return res; }
269ebd0826dd28c0eab7f6e9b58b79dc20de8671.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 */ #include "common_magma.h" #define inf_bs 32 #define max_bs 64 /* ====================================================================== */ /* inf-norm */ /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n % inf_bs == 0 and A is stored lower. * Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each. */ __global__ void slansy_inf_kernel_special_l( int n, const float* A, int lda, float *dwork ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; A += ind; A += ty * lda; int break_d = blockIdx.x*inf_bs; // loop over all 32x32 blocks left of the diagonal block for(int i=0; i < break_d; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } // 32x4 threads cooperatively load 32x32 diagonal block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); // symmetrize block // TODO make diagonal element real #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else la[tx][i] = la[tx][i]; // TODO: not needed } __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); // loop over all 32x32 blocks below diagonal block for(int i=break_d; i < n; i += inf_bs ) { // 32x4 threads cooperatively load 32x32 block #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); // compute 4 partial sums of each row #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); } // store partial sums into shared memory la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); // 32x1 threads compute final result of each row if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored lower */ __global__ void slansy_inf_kernel_generic_l( int n, const float* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; if ( blockIdx.x == n_full_block ) { /************************************************************************ -- Last (partial) block -- -- We will do something unusual here -- Threads past end of matrix (i.e., ind >= n) are redundantly assigned -- the last row (n-1). At the end, those results are ignored -- only -- results for ind < n are saved into dwork. -- For sufficiently large matrix the overhead will be very low *************************************************************************/ if ( tx < n_mod_bs ) { A += ( blockIdx.x*inf_bs + tx ); } else { A += ( blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row } A += ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } /* we don't need to make results for rows >= n zero, as those computation will be discarded. */ if ( ty == 0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j; int count = 1; // TODO don't need initialization if ( tx < n_mod_bs ) count = tx; else count = n_mod_bs; for(j=0; j <= count; j++) { res += fabsf( A[j*lda] ); } A += tx*lda; count = 1; for( ; j < n_mod_bs; j++) { res += fabsf( A[count] ); count++; } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if ( tx < n_mod_bs ) dwork[ind] = res; } } else { /*----------------------------------- -- All the blocks but the last one -- -------------------------------------*/ A += ind; A += ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A += lda*inf_bs; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else la[tx][i] = la[tx][i]; // TODO: not needed } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); n -= n_mod_bs; /*----------------------------- Go Down -------------------------------*/ for(int i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[j*lda]; A += inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing n_mod_bs stuffs here. Symmetric is giving us benefit .. true -----------------------------------------------*/ A -= tx; if ( tx < n_mod_bs ) { A += tx; } else { A += (n_mod_bs-1); /* Same as above */ } #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { if ( tx < n_mod_bs ) la[ty+j][tx] = A[j*lda]; //MAGMA_S_MUL( MAGMA_S_ONE, A[j*lda] ); // huh? just A[j*lda]? else la[ty+j][tx] = MAGMA_S_ZERO; //MAGMA_S_MUL( MAGMA_S_ZERO, A[j*lda] ); // huh? just 0? } __syncthreads(); /*---------------------------------------- What about doing some Zeroing here? instead of zeroing before? -----------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } __syncthreads(); la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n is any size and A is stored upper */ __global__ void slansy_inf_kernel_generic_u( int n, const float* A, int lda, float *dwork, int n_full_block, int n_mod_bs ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; __shared__ float la[inf_bs][inf_bs+1]; int blockIdxx = blockIdx.x; if ( blockIdx.x == n_full_block ) { /************************************************************************ -- Last block -- -- We will do something unusual here -- For sufficiently large matrix the overhead will be very low *************************************************************************/ ind = tx; A += lda*(n-1); if ( tx < n_mod_bs ) { A += tx; } else { A += (n_mod_bs - 1); } A -= ty * lda; int break_d = blockIdx.x*inf_bs; /*---------------------------- Go Right -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } /* we don't need to make zero, as those computation will be discarded. */ if ( ty == 0 ) { /*-------------------------------------------- he will compute the triangular parts others will be waiting with values. -----------------------------------------------*/ int j; int count = 1; if ( tx < n_mod_bs ) count = n_mod_bs- tx; else count = n_mod_bs; for(j=0; j < count; j++) { res += fabsf( A[-j*lda] ); } A -= (count-1)*lda; count = 1; for( ; j < n_mod_bs; j++) { res += fabsf( A[-count] ); count++; } } else { } __syncthreads(); la[tx][ty] = MAGMA_S_MAKE( res, 0. ); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); if ( tx < n_mod_bs ) dwork[ind] = res; } } else { /*----------------------------------- -- All the blocks but the last one -- -- By the way this code can be optimized more. -------------------------------------*/ ind = blockIdx.x*inf_bs + tx + n_mod_bs; const float *A1 = A; A += lda*(n-1); A += ind; A -= ty * lda; int break_d = (n/inf_bs - blockIdxx - 1)*inf_bs; /*---------------------------- Go Left -------------------------------*/ for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } /*------------------------------------ Diagonal Copy + Transpose lower triangle --------------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][31-ty-j] = A[ -j * lda]; } A -= inf_bs; __syncthreads(); /*-------------------------------------------- Mirror Upper Triangle to Lower triangle ---------------------------------------------*/ #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else { la[tx][i] = la[tx][i]; // TODO: not needed } } __syncthreads(); /*-------------------------------- Do diagonal Computation -----------------------------------*/ #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); n -= n_mod_bs; /*----------------------------- Go Up -------------------------------*/ int i; for( i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[ty+j][tx] = A[- j * lda]; } A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf ( la[31-tx][j+ty*8] ); } __syncthreads(); } /*--------------------------------------------- doing n_mod_bs stuffs here. Symmetric is giving us benefit .. true Do the other way please...... see slansy_inf_kernel_generic_l code above TODO compare performance with lower case and use that implementation if better. -----------------------------------------------*/ A1 = A1 + n_mod_bs*lda + tx*lda; if ( ty == 0 ) { for( int j = 0; j < n_mod_bs; j++) { res += fabsf( A1[ j + lda * blockIdx.x * inf_bs ] ); } } __syncthreads(); la[tx][ty]= MAGMA_S_MAKE( res, 0); __syncthreads(); /*-------------------------------------------------------- The leader accumulates all the results from his peer. ----------------------------------------------------------*/ if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf, * where n % inf_bs == 0 and A is stored upper */ __global__ void slansy_inf_kernel_special_u( int n, const float* A, int lda, float *dwork ) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int ind = blockIdx.x*inf_bs + tx; float res = 0.; /* Reverse Computation ... - Left - Triangle - Up */ A += lda*(n-1); __shared__ float la[inf_bs][inf_bs+1]; A += ind; A -= ty * lda; int break_d = (n / inf_bs - blockIdx.x-1 )*inf_bs; for(int i=0; i < break_d; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) { la[tx][ty+j] = A[-j*lda]; } __syncthreads(); #pragma unroll 8 for(int j=0; j < 8; j++) { res += fabsf( la[tx][j+ty*8] ); } A -= lda*inf_bs; __syncthreads(); } #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[tx][31-ty-j] = A[ -j * lda]; /* Look at the indexing changes */ A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int i=ty*8; i < (1+ty)*inf_bs/4; i++) { if ( i < tx ) { la[tx][i] = la[i][tx]; } else { la[tx][i] = la[tx][i]; // TODO: not needed } } __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[tx][j+ty*8] ); } break_d += inf_bs; __syncthreads(); for(int i=break_d; i < n; i += inf_bs ) { #pragma unroll 8 for(int j=0; j < inf_bs; j += 4) la[ty+j][tx] = A[ -j * lda]; A -= inf_bs; __syncthreads(); #pragma unroll 8 for(int j=0; j < inf_bs/4; j++) { res += fabsf( la[31-tx][j+ty*8] ); } __syncthreads(); } la[tx][ty]= MAGMA_S_MAKE( res, 0. ); __syncthreads(); if ( ty == 0 ) { res = res + MAGMA_S_REAL( la[tx][1] ) + MAGMA_S_REAL( la[tx][2] ) + MAGMA_S_REAL( la[tx][3] ); dwork[ind] = res; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */ extern "C" void slansy_inf( magma_uplo_t uplo, int n, const float *A, int lda, float *dwork ) { /* Note: The UPLO = 'U' Version can be optimized more. */ int blocks = (n - 1)/inf_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(inf_bs, 4, 1); if ( n % inf_bs == 0 ) { if ( uplo == 'L' || uplo == 'l') { slansy_inf_kernel_special_l<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } else { slansy_inf_kernel_special_u<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork); } } else { int n_full_block = (n - n % inf_bs) /inf_bs; int n_mod_bs = n % inf_bs; if ( uplo == 'L' || uplo == 'l') { slansy_inf_kernel_generic_l<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } else { slansy_inf_kernel_generic_u<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork, n_full_block, n_mod_bs ); } } } /* ====================================================================== */ /* max-norm */ /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */ __global__ void slansy_max_kernel_l( int n, const float* A, int lda, float *dwork ) { int tx = threadIdx.x; int ind = blockIdx.x * max_bs + tx; float res = 0., res1; int break_d = blockIdx.x * max_bs; if (ind < n) { A += ind; // loop over blocks left of diagonal block for(int i=0; i < break_d; i += max_bs ) { #pragma unroll 8 for(int j=0; j < max_bs; j++) { res1 = fabsf( A[j*lda] ); res = fmax( res, res1 ); } A += lda*max_bs; } // process diagonal block for(int j=0; j <= tx; j++) { res1 = fabsf( A[j*lda] ); res = fmax( res, res1 ); } dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. * TODO compare performance with lower case and use that implementation if better. */ __global__ void slansy_max_kernel_u( int n, const float* A, int lda, float *dwork ) { int ind = blockIdx.x * max_bs + threadIdx.x; float res = 0.; A += ind; if (ind < n) { for(int j=n-1; j >= ind; j--) res = fmax( res, fabsf( A[j*lda] ) ); dwork[ind] = res; } } /* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */ extern "C" void slansy_max( magma_uplo_t uplo, int n, const float *A, int lda, float *dwork ) { int blocks = (n - 1)/max_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(max_bs, 1, 1); if ( uplo == 'L' || uplo == 'l' ) { slansy_max_kernel_l<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } else { slansy_max_kernel_u<<< grid, threads, 0, magma_stream >>> ( n, A, lda, dwork ); } } /* ====================================================================== */ /* Purpose ======= SLANSY returns the value of the one norm, or the Frobenius norm, or the infinity norm, or the element of largest absolute value of a real symmetric matrix A. SLANSY = ( max(abs(A(i,j))), NORM = 'M' or 'm' ( ( norm1(A), NORM = '1', 'O' or 'o' ** supported only for CUDA_ARCH >= 200 ( ( normI(A), NORM = 'I' or 'i' ** supported only for CUDA_ARCH >= 200 ( ( normF(A), NORM = 'F', 'f', 'E' or 'e' ** not yet supported where norm1 denotes the one norm of a matrix (maximum column sum), normI denotes the infinity norm of a matrix (maximum row sum) and normF denotes the Frobenius norm of a matrix (square root of sum of squares). Note that max(abs(A(i,j))) is not a consistent matrix norm. Returns SLANSY < 0: if SLANSY = -i, the i-th argument had an illegal value. Arguments: ========== NORM (input) CHARACTER*1 Specifies the value to be returned in SLANSY as described above. UPLO (input) CHARACTER*1 Specifies whether the upper or lower triangular part of the symmetric matrix A is to be referenced. = 'U': Upper triangular part of A is referenced = 'L': Lower triangular part of A is referenced N (input) INTEGER The order of the matrix A. N >= 0. When N = 0, SLANSY is set to zero. A (input) REAL array on the GPU, dimension (LDA,N) The symmetric matrix A. If UPLO = 'U', the leading n by n upper triangular part of A contains the upper triangular part of the matrix A, and the strictly lower triangular part of A is not referenced. If UPLO = 'L', the leading n by n lower triangular part of A contains the lower triangular part of the matrix A, and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(N,1). DWORK (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)), where LWORK >= N. NOTE: this is different than LAPACK, where WORK is only required for norm1 and normI. */ extern "C" float magmablas_slansy( magma_norm_t norm, magma_uplo_t uplo, magma_int_t n, const float *A, magma_int_t lda, float *dwork ) { magma_int_t info = 0; magma_int_t arch = magma_getdevice_arch(); // 1-norm == inf-norm since A is symmetric bool inf_norm = (norm == 'I' || norm == 'i' || norm == '1' || norm == 'O' || norm == 'o'); bool max_norm = (norm == 'M' || norm == 'm'); if ( ! max_norm && (! inf_norm || arch < 200) ) info = -1; else if ( uplo != 'u' && uplo != 'U' && uplo != 'l' && uplo != 'L' ) info = -2; else if ( n < 0 ) info = -3; else if ( lda < n ) info = -5; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return info; } /* Quick return */ if ( n == 0 ) return 0; float res = 0; if ( inf_norm ) { slansy_inf( uplo, n, A, lda, dwork ); int i = cublasIsamax( n, dwork, 1 ) - 1; cudaMemcpy( &res, &dwork[i], sizeof(float), cudaMemcpyDeviceToHost ); } else if ( max_norm ) { slansy_max( uplo, n, A, lda, dwork ); int i = cublasIsamax( n, dwork, 1 ) - 1; cudaMemcpy( &res, &dwork[i], sizeof(float), cudaMemcpyDeviceToHost ); } return res; }
ae4a116cbfd1f03c16df87527798f15175e05283.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from magma_zmcsrcompressor_gpu.cu normal z -> d, Sun May 3 11:22:58 2015 @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ){ int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are hipLaunchKernelGGL(( magma_dmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue , A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); hipLaunchKernelGGL(( magma_dmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue , A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back hipLaunchKernelGGL(( magma_dmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue , A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
ae4a116cbfd1f03c16df87527798f15175e05283.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from magma_zmcsrcompressor_gpu.cu normal z -> d, Sun May 3 11:22:58 2015 @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_dmcsrgpu_kernel1( int num_rows, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ double zero = MAGMA_D_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_dmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ){ int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_dmcsrgpu_kernel3( int num_rows, double *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, double *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; double zero = MAGMA_D_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param A magma_d_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_dmcsrcompressor_gpu( magma_d_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_d_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_d_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are magma_dmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue >>> ( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); magma_dmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue >>> ( A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_dmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back magma_dmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue >>> ( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_dmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_dmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_dmcsrcompressor_gpu( &dA, queue )); magma_dmfree( &dA, queue ); magma_dmfree( A, queue ); CHECK( magma_dmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_dmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); } cleanup: magma_dmfree( &dA, queue ); magma_dmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
53894b8f0169be3caaa2ad9ed18f2c908f913dbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 1993-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Compile: nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu To enable/disable different performance options add the flat -DPERFOPTSx Where x is a bitmask defining the options used (see below). Run: ./LSTM or ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> Example (run on an NVIDIA M40): > ./LSTM Running with default settings seqLength 100, numLayers 4, hiddenSize 512, miniBatch 64 i checksum (example 0) 5.113463E+04 h checksum (example 0) 2.048000E+03 c checksum (example 0) 2.058137E+05 i checksum 3.272639E+06 c checksum 1.317278E+07 h checksum 1.310720E+05 Runtime 27.807743ms */ #include <stdio.h> #include <rocblas.h> #include <hiprand/hiprand.h> // Performance is not significantly different, but false saves memory. // False does not work with unfused pointwise ops. #define TRAINING (false) #ifndef PERFOPTS #define PERFOPTS (31) #endif #define GROUP_GEMM ((PERFOPTS & 1)) #define USE_STREAMS ((PERFOPTS & 2)) #define FUSE_PW ((PERFOPTS & 4)) #define PRE_TRANSPOSE ((PERFOPTS & 8)) #define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1)) // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(hipError_t stat, const char *file, int line) { if (stat != hipSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(hipblasStatus_t stat, const char *file, int line) { if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(hiprandStatus_t stat, const char *file, int line) { if (stat != HIPRAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } // Pointwise functions __global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] += bias[i % nBias]; } __global__ void pw_vecAdd(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] + b[i]; } __global__ void pw_vecMul(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] * b[i]; } __global__ void pw_tanh(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = tanh(a[i]); } __global__ void pw_sigmoid(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = sigmoidf(a[i]); } // Unfused LSTM (calling many pointwise kernels). int LSTM_elementwise_unfused( int hiddenSize, int miniBatch, float * __restrict__ tmp_h, float * __restrict__ tmp_i, float * __restrict__ bias, float * __restrict__ linearGates, float * __restrict__ h_data, float * __restrict__ i_data, float * __restrict__ c_in, float * __restrict__ c_out, bool training, hipStream_t stream) { dim3 blockDim; dim3 gridDim; int numElements = hiddenSize * miniBatch; blockDim.x = 128; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; for (int i = 0; i < 4; i++) { if (tmp_h != NULL) { hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, tmp_i + i * numElements, tmp_h + i * numElements, numElements); cudaErrCheck(hipGetLastError()); } hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_biasAdd) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + i * numElements, bias + (i + 4) * hiddenSize, numElements, hiddenSize); cudaErrCheck(hipGetLastError()); if (training) { printf("LSTM_elementWise_unfused does not support training\n"); return 1; } } hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + 0 * numElements, tmp_i + 0 * numElements, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + 1 * numElements, tmp_i + 1 * numElements, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + 2 * numElements, tmp_i + 2 * numElements, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_sigmoid) , dim3(gridDim), dim3(blockDim), 0, stream , tmp_i + 3 * numElements, tmp_i + 3 * numElements, numElements); cudaErrCheck(hipGetLastError()); float *in_gate = tmp_i + 0 * numElements; float *forget_gate = tmp_i + 1 * numElements; float *in_gate2 = tmp_i + 2 * numElements; float *out_gate = tmp_i + 3 * numElements; if (c_in == NULL) { hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, in_gate2, numElements); cudaErrCheck(hipGetLastError()); } else { hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , forget_gate, forget_gate, c_in, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, in_gate2, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_vecAdd) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, forget_gate, numElements); cudaErrCheck(hipGetLastError()); } if (c_out != NULL) { cudaErrCheck(hipMemcpyAsync(c_out, in_gate, numElements * sizeof(float), hipMemcpyDeviceToDevice, stream)); } hipLaunchKernelGGL(( pw_tanh) , dim3(gridDim), dim3(blockDim), 0, stream , in_gate, in_gate, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , h_data, out_gate, in_gate, numElements); cudaErrCheck(hipGetLastError()); hipLaunchKernelGGL(( pw_vecMul) , dim3(gridDim), dim3(blockDim), 0, stream , i_data, out_gate, in_gate, numElements); cudaErrCheck(hipGetLastError()); return 0; } // Fused forward kernel __global__ void elementWise_fp(int hiddenSize, int miniBatch, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *i_out, float *c_in, float *c_out, bool training) { int index = blockIdx.x * blockDim.x + threadIdx.x; int numElements = miniBatch * hiddenSize; if (index >= numElements) return; int batch = index / hiddenSize; int gateIndex = (index % hiddenSize) + 4 * batch * hiddenSize; float g[4]; for (int i = 0; i < 4; i++) { g[i] = tmp_i[i * hiddenSize + gateIndex] + tmp_h[i * hiddenSize + gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize]; if (training) linearGates[gateIndex + i * hiddenSize] = g[i]; } float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float in_gate2 = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float val = (forget_gate * c_in[index]) + (in_gate * in_gate2); c_out[index] = val; val = out_gate * tanhf(val); h_out[index] = val; i_out[index] = val; } float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, bool checkF) { float *h_data; float *i_data; float *c_data; float *T; float *T_f; float *bias; float *tmp_h; float *tmp_i; float *linearGates; hipStream_t *stream_i; hipStream_t *stream_h; hipEvent_t **events_i; hipEvent_t **events_h; // Need a cuBLAS handle. hipblasHandle_t handle; cublasErrCheck(hipblasCreate(&handle)); // Allocate streams/events stream_i = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t)); stream_h = (hipStream_t*)malloc(numLayers * sizeof(hipStream_t)); // If we don't want to use streams we can launch everything in to the NULL stream for (int i = 0; i < numLayers; i++) { if (USE_STREAMS) { cudaErrCheck(hipStreamCreate(&stream_i[i])); // Priority is empirical. cudaErrCheck(hipStreamCreateWithPriority(&stream_h[i], 0, -1)); } else { stream_i[i] = NULL; stream_h[i] = NULL; } } events_i = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*)); events_h = (hipEvent_t**)malloc(numLayers * sizeof(hipEvent_t*)); for (int i = 0; i < numLayers; i++) { events_i[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t)); events_h[i] = (hipEvent_t*)malloc(seqLength * sizeof(hipEvent_t)); } // Input/output data int numElements = hiddenSize * miniBatch; cudaErrCheck(hipMalloc((void**)&h_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&i_data, (seqLength) * (numLayers + 1) * numElements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&T, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(hipMalloc((void**)&T_f, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(hipMalloc((void**)&bias, numLayers * hiddenSize * 8 * sizeof(float))); // Workspace cudaErrCheck(hipMalloc((void**)&tmp_h, 4 * numLayers * numElements * sizeof(float))); cudaErrCheck(hipMalloc((void**)&tmp_i, 4 * seqLength * numElements * sizeof(float))); // Activations if (TRAINING) { cudaErrCheck(hipMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float))); } // Initialise with random values. hiprandGenerator_t rng; curandErrCheck(hiprandCreateGenerator(&rng, HIPRAND_RNG_PSEUDO_DEFAULT)); curandErrCheck(hiprandSetPseudoRandomGeneratorSeed(rng, 1337ull)); curandErrCheck(hiprandGenerateUniform(rng, h_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(hiprandGenerateUniform(rng, c_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(hiprandGenerateUniform(rng, i_data, (seqLength) * (numLayers + 1) * numElements)); curandErrCheck(hiprandGenerateUniform(rng, T, numLayers * hiddenSize * hiddenSize * 8)); curandErrCheck(hiprandGenerateUniform(rng, bias, numLayers * hiddenSize * 8)); curandErrCheck(hiprandDestroyGenerator(rng)); // Make sure everything is done before we start the timers cudaErrCheck(hipDeviceSynchronize()); // Timing starts here float elapsedTime; hipEvent_t start, stop; cudaErrCheck(hipEventCreate(&start)); cudaErrCheck(hipEventCreate(&stop)); cudaErrCheck(hipEventRecord(start)); float alpha = 1.f; float beta = 0.f; const hipblasOperation_t transa = (PRE_TRANSPOSE && (seqLength > 1)) ? HIPBLAS_OP_N : HIPBLAS_OP_T; const hipblasOperation_t transb = HIPBLAS_OP_N; // Optimization 4 if (transa == HIPBLAS_OP_N) { for (int layer = 0; layer < numLayers; layer++) { float *T_i_in = T + layer * hiddenSize * hiddenSize * 8; float *T_i_out = T_f + layer * hiddenSize * hiddenSize * 8; float *T_h_in = T + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; float *T_h_out = T_f + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; cublasErrCheck(hipblasSetStream(handle, stream_i[layer])); cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize)); cublasErrCheck(hipblasSetStream(handle, stream_h[layer])); cublasErrCheck(hipblasSgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_h_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_h_out, 4 * hiddenSize)); } } else { T_f = T; } if (transb != HIPBLAS_OP_N) { printf("Only transb == HIPBLAS_OP_N supported\n"); return -1; } int lStart = 0; int lEnd = 0; int rStart = 0; int rEnd = 0; int recurBatchSize = RECUR_BATCH_SIZE; while (true) { // Many layer "scheduling". if (lEnd == 0) { lStart = 0; lEnd = 1; rStart = 0; } else { // Move "up" and "left" lStart++; lEnd++; rStart -= recurBatchSize; // Over the top or off the left, reset to layer 0 if (lEnd > numLayers || rStart < 0) { rStart += (lStart + 1) * recurBatchSize; lStart = 0; lEnd = 1; } // Off the right, step up while (rStart >= seqLength && lEnd <= numLayers) { lStart++; lEnd++; rStart -= recurBatchSize; } // Over the top or off the left, done! if (lEnd > numLayers || rStart < 0) { break; } } rEnd = rStart + recurBatchSize; if (rEnd > seqLength) rEnd = seqLength; for (int layer = lStart; layer < lEnd; layer++) { cublasErrCheck(hipblasSetStream(handle, stream_i[layer])); for (int i = rStart; i < rEnd; i++) { if (layer > 0) { cudaErrCheck(hipStreamWaitEvent(stream_i[layer], events_h[layer - 1][i], 0)); cudaErrCheck(hipEventDestroy(events_h[layer - 1][i])); } } // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(hipblasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize], transa == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(hipblasSgemm(handle, transa, transb, hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } for (int i = rStart; i < rEnd; i++) { cudaErrCheck(hipEventCreate(&events_i[layer][i], hipEventDisableTiming)); cudaErrCheck(hipEventRecord(events_i[layer][i], stream_i[layer])); } for (int i = rStart; i < rEnd; i++) { cublasErrCheck(hipblasSetStream(handle, stream_h[layer])); // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(hipblasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize], transa == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(hipblasSgemm(handle, transa, transb, hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == HIPBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } cudaErrCheck(hipStreamWaitEvent(stream_h[layer], events_i[layer][i], 0)); cudaErrCheck(hipEventDestroy(events_i[layer][i])); // Optimization 3 if (FUSE_PW) { dim3 blockDim; dim3 gridDim; blockDim.x = 256; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; hipLaunchKernelGGL(( elementWise_fp) , dim3(gridDim), dim3(blockDim) , 0, stream_h[layer] , hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING); cudaErrCheck(hipGetLastError()); } else { LSTM_elementwise_unfused(hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING, stream_h[layer]); } if (layer != numLayers - 1) { cudaErrCheck(hipEventCreate(&events_h[layer][i], hipEventDisableTiming)); cudaErrCheck(hipEventRecord(events_h[layer][i], stream_h[layer])); } } } } cudaErrCheck(hipEventRecord(stop)); cudaErrCheck(hipEventSynchronize(stop)); cudaErrCheck(hipEventElapsedTime(&elapsedTime, start, stop)); cudaErrCheck(hipDeviceSynchronize()); // We're done. Print some checksums if (checkF) { float* testOutputi; float* testOutputh; float* testOutputc; int numElements = hiddenSize * miniBatch; testOutputi = (float*)malloc(numElements * seqLength * sizeof(float)); testOutputh = (float*)malloc(numElements * numLayers * sizeof(float)); testOutputc = (float*)malloc(numElements * numLayers * sizeof(float)); cudaErrCheck(hipMemcpy(testOutputi, i_data + numLayers * seqLength * numElements, seqLength * numElements * sizeof(float), hipMemcpyDeviceToHost)); for (int layer = 0; layer < numLayers; layer++) { cudaErrCheck(hipMemcpy(testOutputh + layer * numElements, h_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), hipMemcpyDeviceToHost)); cudaErrCheck(hipMemcpy(testOutputc + layer * numElements, c_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), hipMemcpyDeviceToHost)); } double checksumi = 0.; double checksumh = 0.; double checksumc = 0.; for (int m = 0; m < miniBatch; m++) { for (int j = 0; j < seqLength; j++) { for (int i = 0; i < hiddenSize; i++) { checksumi += testOutputi[j * numElements + m * hiddenSize + i]; if (hiddenSize <= 8) printf("i: (%d,%d): %E\n", j, i, testOutputi[j * numElements + m * hiddenSize + i]); } } for (int j = 0; j < numLayers; j++) { for (int i = 0; i < hiddenSize; i++) { checksumh += testOutputh[j * numElements + m * hiddenSize + i]; checksumc += testOutputc[j * numElements + m * hiddenSize + i]; } } if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi); if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh); if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc); } printf("i checksum %E ", checksumi); printf("c checksum %E ", checksumc); printf("h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } cudaErrCheck(hipDeviceSynchronize()); cudaErrCheck(hipFree(h_data)); cudaErrCheck(hipFree(i_data)); cudaErrCheck(hipFree(c_data)); if (T != T_f) cudaErrCheck(hipFree(T)); cudaErrCheck(hipFree(T_f)); cudaErrCheck(hipFree(bias)); cudaErrCheck(hipFree(tmp_h)); cudaErrCheck(hipFree(tmp_i)); if (TRAINING) cudaErrCheck(hipFree(linearGates)); for (int i = 0; i < numLayers; i++) { if (stream_i[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_i[i])); if (stream_h[i] != NULL) cudaErrCheck(hipStreamDestroy(stream_h[i])); } free(stream_i); free(stream_h); for (int i = 0; i < numLayers; i++) { free(events_i[i]); free(events_h[i]); } free(events_i); free(events_h); return elapsedTime; } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int miniBatch; if (argc == 5) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); miniBatch = atoi(argv[4]); } else if (argc == 1) { printf("Running with default settings\n"); seqLength = 100; numLayers = 4; hiddenSize = 512; miniBatch = 64; } else { printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n"); return 1; } printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch); int numRuns = 1; float totalTime = 0.f; for (int run = 0; run < numRuns; run++) { totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true); } printf("Runtime %fms\n", totalTime / numRuns); return time < 0; }
53894b8f0169be3caaa2ad9ed18f2c908f913dbc.cu
/* Copyright (c) 1993-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Compile: nvcc -arch=sm_52 -O3 -lcublas -lcurand -o LSTM LSTM.cu To enable/disable different performance options add the flat -DPERFOPTSx Where x is a bitmask defining the options used (see below). Run: ./LSTM or ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch> Example (run on an NVIDIA M40): > ./LSTM Running with default settings seqLength 100, numLayers 4, hiddenSize 512, miniBatch 64 i checksum (example 0) 5.113463E+04 h checksum (example 0) 2.048000E+03 c checksum (example 0) 2.058137E+05 i checksum 3.272639E+06 c checksum 1.317278E+07 h checksum 1.310720E+05 Runtime 27.807743ms */ #include <stdio.h> #include <cublas_v2.h> #include <curand.h> // Performance is not significantly different, but false saves memory. // False does not work with unfused pointwise ops. #define TRAINING (false) #ifndef PERFOPTS #define PERFOPTS (31) #endif #define GROUP_GEMM ((PERFOPTS & 1)) #define USE_STREAMS ((PERFOPTS & 2)) #define FUSE_PW ((PERFOPTS & 4)) #define PRE_TRANSPOSE ((PERFOPTS & 8)) #define RECUR_BATCH_SIZE (((PERFOPTS & 16) ? 2 : 1)) // Define some error checking macros. #define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); } void cudaErrCheck_(cudaError_t stat, const char *file, int line) { if (stat != cudaSuccess) { fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line); } } #define cublasErrCheck(stat) { cublasErrCheck_((stat), __FILE__, __LINE__); } void cublasErrCheck_(cublasStatus_t stat, const char *file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "cuBLAS Error: %d %s %d\n", stat, file, line); } } #define curandErrCheck(stat) { curandErrCheck_((stat), __FILE__, __LINE__); } void curandErrCheck_(curandStatus_t stat, const char *file, int line) { if (stat != CURAND_STATUS_SUCCESS) { fprintf(stderr, "cuRand Error: %d %s %d\n", stat, file, line); } } // Device functions __forceinline__ __device__ float sigmoidf(float in) { return 1.f / (1.f + expf(-in)); } // Pointwise functions __global__ void pw_biasAdd(float *y, float *bias, int n, int nBias) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] += bias[i % nBias]; } __global__ void pw_vecAdd(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] + b[i]; } __global__ void pw_vecMul(float *y, float *a, float *b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = a[i] * b[i]; } __global__ void pw_tanh(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = tanh(a[i]); } __global__ void pw_sigmoid(float *y, float *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[i] = sigmoidf(a[i]); } // Unfused LSTM (calling many pointwise kernels). int LSTM_elementwise_unfused( int hiddenSize, int miniBatch, float * __restrict__ tmp_h, float * __restrict__ tmp_i, float * __restrict__ bias, float * __restrict__ linearGates, float * __restrict__ h_data, float * __restrict__ i_data, float * __restrict__ c_in, float * __restrict__ c_out, bool training, cudaStream_t stream) { dim3 blockDim; dim3 gridDim; int numElements = hiddenSize * miniBatch; blockDim.x = 128; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; for (int i = 0; i < 4; i++) { if (tmp_h != NULL) { pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, tmp_i + i * numElements, tmp_h + i * numElements, numElements); cudaErrCheck(cudaGetLastError()); } pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + i * hiddenSize, numElements, hiddenSize); cudaErrCheck(cudaGetLastError()); pw_biasAdd <<< gridDim, blockDim, 0, stream >>> (tmp_i + i * numElements, bias + (i + 4) * hiddenSize, numElements, hiddenSize); cudaErrCheck(cudaGetLastError()); if (training) { printf("LSTM_elementWise_unfused does not support training\n"); return 1; } } pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 0 * numElements, tmp_i + 0 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 1 * numElements, tmp_i + 1 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_tanh <<< gridDim, blockDim, 0, stream >>> (tmp_i + 2 * numElements, tmp_i + 2 * numElements, numElements); cudaErrCheck(cudaGetLastError()); pw_sigmoid <<< gridDim, blockDim, 0, stream >>> (tmp_i + 3 * numElements, tmp_i + 3 * numElements, numElements); cudaErrCheck(cudaGetLastError()); float *in_gate = tmp_i + 0 * numElements; float *forget_gate = tmp_i + 1 * numElements; float *in_gate2 = tmp_i + 2 * numElements; float *out_gate = tmp_i + 3 * numElements; if (c_in == NULL) { pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements); cudaErrCheck(cudaGetLastError()); } else { pw_vecMul <<< gridDim, blockDim, 0, stream >>> (forget_gate, forget_gate, c_in, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, in_gate2, numElements); cudaErrCheck(cudaGetLastError()); pw_vecAdd <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, forget_gate, numElements); cudaErrCheck(cudaGetLastError()); } if (c_out != NULL) { cudaErrCheck(cudaMemcpyAsync(c_out, in_gate, numElements * sizeof(float), cudaMemcpyDeviceToDevice, stream)); } pw_tanh <<< gridDim, blockDim, 0, stream >>> (in_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (h_data, out_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); pw_vecMul <<< gridDim, blockDim, 0, stream >>> (i_data, out_gate, in_gate, numElements); cudaErrCheck(cudaGetLastError()); return 0; } // Fused forward kernel __global__ void elementWise_fp(int hiddenSize, int miniBatch, float *tmp_h, float *tmp_i, float *bias, float *linearGates, float *h_out, float *i_out, float *c_in, float *c_out, bool training) { int index = blockIdx.x * blockDim.x + threadIdx.x; int numElements = miniBatch * hiddenSize; if (index >= numElements) return; int batch = index / hiddenSize; int gateIndex = (index % hiddenSize) + 4 * batch * hiddenSize; float g[4]; for (int i = 0; i < 4; i++) { g[i] = tmp_i[i * hiddenSize + gateIndex] + tmp_h[i * hiddenSize + gateIndex]; g[i] += bias[i * hiddenSize + index % hiddenSize] + bias[(i + 4) * hiddenSize + index % hiddenSize]; if (training) linearGates[gateIndex + i * hiddenSize] = g[i]; } float in_gate = sigmoidf(g[0]); float forget_gate = sigmoidf(g[1]); float in_gate2 = tanhf(g[2]); float out_gate = sigmoidf(g[3]); float val = (forget_gate * c_in[index]) + (in_gate * in_gate2); c_out[index] = val; val = out_gate * tanhf(val); h_out[index] = val; i_out[index] = val; } float LSTMTest(int hiddenSize, int miniBatch, int seqLength, int numLayers, bool checkF) { float *h_data; float *i_data; float *c_data; float *T; float *T_f; float *bias; float *tmp_h; float *tmp_i; float *linearGates; cudaStream_t *stream_i; cudaStream_t *stream_h; cudaEvent_t **events_i; cudaEvent_t **events_h; // Need a cuBLAS handle. cublasHandle_t handle; cublasErrCheck(cublasCreate(&handle)); // Allocate streams/events stream_i = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t)); stream_h = (cudaStream_t*)malloc(numLayers * sizeof(cudaStream_t)); // If we don't want to use streams we can launch everything in to the NULL stream for (int i = 0; i < numLayers; i++) { if (USE_STREAMS) { cudaErrCheck(cudaStreamCreate(&stream_i[i])); // Priority is empirical. cudaErrCheck(cudaStreamCreateWithPriority(&stream_h[i], 0, -1)); } else { stream_i[i] = NULL; stream_h[i] = NULL; } } events_i = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*)); events_h = (cudaEvent_t**)malloc(numLayers * sizeof(cudaEvent_t*)); for (int i = 0; i < numLayers; i++) { events_i[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t)); events_h[i] = (cudaEvent_t*)malloc(seqLength * sizeof(cudaEvent_t)); } // Input/output data int numElements = hiddenSize * miniBatch; cudaErrCheck(cudaMalloc((void**)&h_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&i_data, (seqLength) * (numLayers + 1) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&c_data, (seqLength + 1) * (numLayers) * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&T, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&T_f, numLayers * hiddenSize * hiddenSize * 8 * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&bias, numLayers * hiddenSize * 8 * sizeof(float))); // Workspace cudaErrCheck(cudaMalloc((void**)&tmp_h, 4 * numLayers * numElements * sizeof(float))); cudaErrCheck(cudaMalloc((void**)&tmp_i, 4 * seqLength * numElements * sizeof(float))); // Activations if (TRAINING) { cudaErrCheck(cudaMalloc((void**)&linearGates, 4 * seqLength * numLayers * numElements * sizeof(float))); } // Initialise with random values. curandGenerator_t rng; curandErrCheck(curandCreateGenerator(&rng, CURAND_RNG_PSEUDO_DEFAULT)); curandErrCheck(curandSetPseudoRandomGeneratorSeed(rng, 1337ull)); curandErrCheck(curandGenerateUniform(rng, h_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(curandGenerateUniform(rng, c_data, (seqLength + 1) * (numLayers) * numElements)); curandErrCheck(curandGenerateUniform(rng, i_data, (seqLength) * (numLayers + 1) * numElements)); curandErrCheck(curandGenerateUniform(rng, T, numLayers * hiddenSize * hiddenSize * 8)); curandErrCheck(curandGenerateUniform(rng, bias, numLayers * hiddenSize * 8)); curandErrCheck(curandDestroyGenerator(rng)); // Make sure everything is done before we start the timers cudaErrCheck(cudaDeviceSynchronize()); // Timing starts here float elapsedTime; cudaEvent_t start, stop; cudaErrCheck(cudaEventCreate(&start)); cudaErrCheck(cudaEventCreate(&stop)); cudaErrCheck(cudaEventRecord(start)); float alpha = 1.f; float beta = 0.f; const cublasOperation_t transa = (PRE_TRANSPOSE && (seqLength > 1)) ? CUBLAS_OP_N : CUBLAS_OP_T; const cublasOperation_t transb = CUBLAS_OP_N; // Optimization 4 if (transa == CUBLAS_OP_N) { for (int layer = 0; layer < numLayers; layer++) { float *T_i_in = T + layer * hiddenSize * hiddenSize * 8; float *T_i_out = T_f + layer * hiddenSize * hiddenSize * 8; float *T_h_in = T + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; float *T_h_out = T_f + layer * hiddenSize * hiddenSize * 8 + hiddenSize * hiddenSize * 4; cublasErrCheck(cublasSetStream(handle, stream_i[layer])); cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_i_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_i_out, 4 * hiddenSize)); cublasErrCheck(cublasSetStream(handle, stream_h[layer])); cublasErrCheck(cublasSgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, 4 * hiddenSize, hiddenSize, &alpha, T_h_in, hiddenSize, &beta, NULL, 4 * hiddenSize, T_h_out, 4 * hiddenSize)); } } else { T_f = T; } if (transb != CUBLAS_OP_N) { printf("Only transb == CUBLAS_OP_N supported\n"); return -1; } int lStart = 0; int lEnd = 0; int rStart = 0; int rEnd = 0; int recurBatchSize = RECUR_BATCH_SIZE; while (true) { // Many layer "scheduling". if (lEnd == 0) { lStart = 0; lEnd = 1; rStart = 0; } else { // Move "up" and "left" lStart++; lEnd++; rStart -= recurBatchSize; // Over the top or off the left, reset to layer 0 if (lEnd > numLayers || rStart < 0) { rStart += (lStart + 1) * recurBatchSize; lStart = 0; lEnd = 1; } // Off the right, step up while (rStart >= seqLength && lEnd <= numLayers) { lStart++; lEnd++; rStart -= recurBatchSize; } // Over the top or off the left, done! if (lEnd > numLayers || rStart < 0) { break; } } rEnd = rStart + recurBatchSize; if (rEnd > seqLength) rEnd = seqLength; for (int layer = lStart; layer < lEnd; layer++) { cublasErrCheck(cublasSetStream(handle, stream_i[layer])); for (int i = rStart; i < rEnd; i++) { if (layer > 0) { cudaErrCheck(cudaStreamWaitEvent(stream_i[layer], events_h[layer - 1][i], 0)); cudaErrCheck(cudaEventDestroy(events_h[layer - 1][i])); } } // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(cublasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(cublasSgemm(handle, transa, transb, hiddenSize, miniBatch * (rEnd - rStart), hiddenSize, &alpha, &T_f[layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, i_data + rStart * numElements + layer * seqLength * numElements, hiddenSize, &beta, tmp_i + 4 * rStart * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } for (int i = rStart; i < rEnd; i++) { cudaErrCheck(cudaEventCreate(&events_i[layer][i], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_i[layer][i], stream_i[layer])); } for (int i = rStart; i < rEnd; i++) { cublasErrCheck(cublasSetStream(handle, stream_h[layer])); // Optimization 1 if (GROUP_GEMM) { cublasErrCheck(cublasSgemm(handle, transa, transb, 4 * hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements, 4 * hiddenSize)); } else { for (int igemm =0; igemm < 4; igemm++) { cublasErrCheck(cublasSgemm(handle, transa, transb, hiddenSize, miniBatch, hiddenSize, &alpha, &T_f[4 * hiddenSize * hiddenSize + layer * 8 * hiddenSize * hiddenSize + igemm * hiddenSize], transa == CUBLAS_OP_N ? 4 * hiddenSize : hiddenSize, h_data + i * numElements + layer * (seqLength + 1) * numElements, hiddenSize, &beta, tmp_h + 4 * layer * numElements + igemm * hiddenSize, 4 * hiddenSize)); } } cudaErrCheck(cudaStreamWaitEvent(stream_h[layer], events_i[layer][i], 0)); cudaErrCheck(cudaEventDestroy(events_i[layer][i])); // Optimization 3 if (FUSE_PW) { dim3 blockDim; dim3 gridDim; blockDim.x = 256; gridDim.x = (numElements + blockDim.x - 1) / blockDim.x; elementWise_fp <<< gridDim, blockDim , 0, stream_h[layer] >>> (hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING); cudaErrCheck(cudaGetLastError()); } else { LSTM_elementwise_unfused(hiddenSize, miniBatch, tmp_h + 4 * layer * numElements, tmp_i + 4 * i * numElements, bias + 8 * layer * hiddenSize, TRAINING ? linearGates + 4 * (i * numElements + layer * seqLength * numElements) : NULL, h_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, i_data + i * numElements + (layer + 1) * seqLength * numElements, c_data + i * numElements + layer * (seqLength + 1) * numElements, c_data + (i + 1) * numElements + layer * (seqLength + 1) * numElements, TRAINING, stream_h[layer]); } if (layer != numLayers - 1) { cudaErrCheck(cudaEventCreate(&events_h[layer][i], cudaEventDisableTiming)); cudaErrCheck(cudaEventRecord(events_h[layer][i], stream_h[layer])); } } } } cudaErrCheck(cudaEventRecord(stop)); cudaErrCheck(cudaEventSynchronize(stop)); cudaErrCheck(cudaEventElapsedTime(&elapsedTime, start, stop)); cudaErrCheck(cudaDeviceSynchronize()); // We're done. Print some checksums if (checkF) { float* testOutputi; float* testOutputh; float* testOutputc; int numElements = hiddenSize * miniBatch; testOutputi = (float*)malloc(numElements * seqLength * sizeof(float)); testOutputh = (float*)malloc(numElements * numLayers * sizeof(float)); testOutputc = (float*)malloc(numElements * numLayers * sizeof(float)); cudaErrCheck(cudaMemcpy(testOutputi, i_data + numLayers * seqLength * numElements, seqLength * numElements * sizeof(float), cudaMemcpyDeviceToHost)); for (int layer = 0; layer < numLayers; layer++) { cudaErrCheck(cudaMemcpy(testOutputh + layer * numElements, h_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost)); cudaErrCheck(cudaMemcpy(testOutputc + layer * numElements, c_data + seqLength * numElements + layer * (seqLength + 1) * numElements, numElements * sizeof(float), cudaMemcpyDeviceToHost)); } double checksumi = 0.; double checksumh = 0.; double checksumc = 0.; for (int m = 0; m < miniBatch; m++) { for (int j = 0; j < seqLength; j++) { for (int i = 0; i < hiddenSize; i++) { checksumi += testOutputi[j * numElements + m * hiddenSize + i]; if (hiddenSize <= 8) printf("i: (%d,%d): %E\n", j, i, testOutputi[j * numElements + m * hiddenSize + i]); } } for (int j = 0; j < numLayers; j++) { for (int i = 0; i < hiddenSize; i++) { checksumh += testOutputh[j * numElements + m * hiddenSize + i]; checksumc += testOutputc[j * numElements + m * hiddenSize + i]; } } if (m == 0) printf("i checksum (example %d) %E\n", m, checksumi); if (m == 0) printf("h checksum (example %d) %E\n", m, checksumh); if (m == 0) printf("c checksum (example %d) %E\n", m, checksumc); } printf("i checksum %E ", checksumi); printf("c checksum %E ", checksumc); printf("h checksum %E\n", checksumh); free(testOutputi); free(testOutputc); free(testOutputh); } cudaErrCheck(cudaDeviceSynchronize()); cudaErrCheck(cudaFree(h_data)); cudaErrCheck(cudaFree(i_data)); cudaErrCheck(cudaFree(c_data)); if (T != T_f) cudaErrCheck(cudaFree(T)); cudaErrCheck(cudaFree(T_f)); cudaErrCheck(cudaFree(bias)); cudaErrCheck(cudaFree(tmp_h)); cudaErrCheck(cudaFree(tmp_i)); if (TRAINING) cudaErrCheck(cudaFree(linearGates)); for (int i = 0; i < numLayers; i++) { if (stream_i[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_i[i])); if (stream_h[i] != NULL) cudaErrCheck(cudaStreamDestroy(stream_h[i])); } free(stream_i); free(stream_h); for (int i = 0; i < numLayers; i++) { free(events_i[i]); free(events_h[i]); } free(events_i); free(events_h); return elapsedTime; } int main(int argc, char* argv[]) { int seqLength; int numLayers; int hiddenSize; int miniBatch; if (argc == 5) { seqLength = atoi(argv[1]); numLayers = atoi(argv[2]); hiddenSize = atoi(argv[3]); miniBatch = atoi(argv[4]); } else if (argc == 1) { printf("Running with default settings\n"); seqLength = 100; numLayers = 4; hiddenSize = 512; miniBatch = 64; } else { printf("Usage: ./LSTM <seqLength> <numLayers> <hiddenSize> <miniBatch>\n"); return 1; } printf("seqLength %d, numLayers %d, hiddenSize %d, miniBatch %d\n", seqLength, numLayers, hiddenSize, miniBatch); int numRuns = 1; float totalTime = 0.f; for (int run = 0; run < numRuns; run++) { totalTime += LSTMTest(hiddenSize, miniBatch, seqLength, numLayers, true); } printf("Runtime %fms\n", totalTime / numRuns); return time < 0; }
c60cb0973b8af2a7fcdcdfc90f74ca6c46e52d41.hip
// !!! This is a file automatically generated by hipify!!! #include "moe_cuda_kernel.h" #include <cstdio> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include "cuda_stream_manager.h" #ifdef MOE_USE_NCCL #include <rccl.h> void moe_cuda_expert_exchange_impl( const long* local_expert_count, long* global_expert_count, int num_expert, int world_size, CudaStreamManager* smgr) { NCCL_SAFE_CALL(ncclGroupStart()); for (int i = 0; i < world_size; ++i) { NCCL_SAFE_CALL(ncclSend( local_expert_count + num_expert * i, num_expert, ncclInt64, i, smgr->ncclcomm, smgr->stream(0))); NCCL_SAFE_CALL(ncclRecv( global_expert_count + num_expert * i, num_expert, ncclInt64, i, smgr->ncclcomm, smgr->stream(0))); } NCCL_SAFE_CALL(ncclGroupEnd()); smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_expert_exchange( torch::Tensor local_expert_count, long num_expert, long n_workers) { auto global_expert_count = torch::empty_like(local_expert_count); auto smgr = getCudaStreamManager(local_expert_count.device().index()); moe_cuda_expert_exchange_impl( local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), num_expert, n_workers, smgr); return {global_expert_count}; } template<typename scalar_t> void moe_cuda_global_scatter_impl( const scalar_t* local_input_buf, const long* local_expert_count, const long* global_expert_count, scalar_t* input_buf, size_t in_feat, size_t num_expert, size_t world_size, CudaStreamManager* smgr) { // assert world_size > 1 int recv_ptr = 0; /* TODO: may save for backward */ long*expert_ptr = new long[num_expert * world_size]; expert_ptr[0] = 0; for (int i = 1; i < num_expert * world_size; ++i) { expert_ptr[i] = expert_ptr[i - 1] + local_expert_count[i - 1]; } for (int i = 0; i < num_expert; ++i) { NCCL_SAFE_CALL(ncclGroupStart()); for (int j = 0; j < world_size; ++j) { int idx = i + j * num_expert; if (local_expert_count[idx]) { NCCL_SAFE_CALL(ncclSend( local_input_buf + expert_ptr[idx] * in_feat, local_expert_count[idx] * in_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); } if (global_expert_count[idx]) { NCCL_SAFE_CALL(ncclRecv( input_buf + recv_ptr * in_feat, global_expert_count[idx] * in_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); recv_ptr += global_expert_count[idx]; } } NCCL_SAFE_CALL(ncclGroupEnd()); } delete [] expert_ptr; smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_global_scatter( torch::Tensor input_buf, torch::Tensor local_expert_count, torch::Tensor global_expert_count, long batch_size, long n_workers) { auto num_expert = local_expert_count.size(0) / n_workers; auto in_feat = input_buf.size(1); auto global_input_buf = input_buf.new_empty({batch_size, in_feat}); auto smgr = getCudaStreamManager(input_buf.device().index()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_buf.scalar_type(), "moe_cuda_global_scatter", ([&] { moe_cuda_global_scatter_impl<scalar_t>( input_buf.data_ptr<scalar_t>(), local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), global_input_buf.data_ptr<scalar_t>(), in_feat, num_expert, n_workers, smgr ); })); return {global_input_buf,}; } template<typename scalar_t> void moe_cuda_global_gather_impl( const scalar_t* output_buf, const long* local_expert_count, const long* global_expert_count, scalar_t* local_output_buf, size_t out_feat, size_t num_expert, size_t world_size, CudaStreamManager* smgr) { long send_ptr = 0; /* TODO: may save for backward */ long *expert_ptr = new long[num_expert * world_size]; expert_ptr[0] = 0; for (int i = 1; i < num_expert * world_size; ++i) { expert_ptr[i] = expert_ptr[i - 1] + local_expert_count[i - 1]; } for (int i = 0; i < num_expert; ++i) { NCCL_SAFE_CALL(ncclGroupStart()); for (int j = 0; j < world_size; ++j) { int idx = i + j * num_expert; if (global_expert_count[idx]) { NCCL_SAFE_CALL(ncclSend( output_buf + send_ptr * out_feat, global_expert_count[idx] * out_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); send_ptr += global_expert_count[idx]; } if (local_expert_count[idx]) { NCCL_SAFE_CALL(ncclRecv( local_output_buf + expert_ptr[idx] * out_feat, local_expert_count[idx] * out_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); } } NCCL_SAFE_CALL(ncclGroupEnd()); } delete [] expert_ptr; smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_global_gather( torch::Tensor output_buf, torch::Tensor local_expert_count, torch::Tensor global_expert_count, long batch_size, long n_workers) { auto num_expert = local_expert_count.size(0) / n_workers; auto out_feat = output_buf.size(1); auto local_output_buf = output_buf.new_empty({batch_size, out_feat}); auto smgr = getCudaStreamManager(output_buf.device().index()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_buf.scalar_type(), "moe_cuda_global_gather", ([&] { moe_cuda_global_gather_impl<scalar_t>( output_buf.data_ptr<scalar_t>(), local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), local_output_buf.data_ptr<scalar_t>(), out_feat, num_expert, n_workers, smgr ); })); return {local_output_buf,}; } #endif
c60cb0973b8af2a7fcdcdfc90f74ca6c46e52d41.cu
#include "moe_cuda_kernel.h" #include <cstdio> #include <iostream> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include "cuda_stream_manager.h" #ifdef MOE_USE_NCCL #include <nccl.h> void moe_cuda_expert_exchange_impl( const long* local_expert_count, long* global_expert_count, int num_expert, int world_size, CudaStreamManager* smgr) { NCCL_SAFE_CALL(ncclGroupStart()); for (int i = 0; i < world_size; ++i) { NCCL_SAFE_CALL(ncclSend( local_expert_count + num_expert * i, num_expert, ncclInt64, i, smgr->ncclcomm, smgr->stream(0))); NCCL_SAFE_CALL(ncclRecv( global_expert_count + num_expert * i, num_expert, ncclInt64, i, smgr->ncclcomm, smgr->stream(0))); } NCCL_SAFE_CALL(ncclGroupEnd()); smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_expert_exchange( torch::Tensor local_expert_count, long num_expert, long n_workers) { auto global_expert_count = torch::empty_like(local_expert_count); auto smgr = getCudaStreamManager(local_expert_count.device().index()); moe_cuda_expert_exchange_impl( local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), num_expert, n_workers, smgr); return {global_expert_count}; } template<typename scalar_t> void moe_cuda_global_scatter_impl( const scalar_t* local_input_buf, const long* local_expert_count, const long* global_expert_count, scalar_t* input_buf, size_t in_feat, size_t num_expert, size_t world_size, CudaStreamManager* smgr) { // assert world_size > 1 int recv_ptr = 0; /* TODO: may save for backward */ long*expert_ptr = new long[num_expert * world_size]; expert_ptr[0] = 0; for (int i = 1; i < num_expert * world_size; ++i) { expert_ptr[i] = expert_ptr[i - 1] + local_expert_count[i - 1]; } for (int i = 0; i < num_expert; ++i) { NCCL_SAFE_CALL(ncclGroupStart()); for (int j = 0; j < world_size; ++j) { int idx = i + j * num_expert; if (local_expert_count[idx]) { NCCL_SAFE_CALL(ncclSend( local_input_buf + expert_ptr[idx] * in_feat, local_expert_count[idx] * in_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); } if (global_expert_count[idx]) { NCCL_SAFE_CALL(ncclRecv( input_buf + recv_ptr * in_feat, global_expert_count[idx] * in_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); recv_ptr += global_expert_count[idx]; } } NCCL_SAFE_CALL(ncclGroupEnd()); } delete [] expert_ptr; smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_global_scatter( torch::Tensor input_buf, torch::Tensor local_expert_count, torch::Tensor global_expert_count, long batch_size, long n_workers) { auto num_expert = local_expert_count.size(0) / n_workers; auto in_feat = input_buf.size(1); auto global_input_buf = input_buf.new_empty({batch_size, in_feat}); auto smgr = getCudaStreamManager(input_buf.device().index()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input_buf.scalar_type(), "moe_cuda_global_scatter", ([&] { moe_cuda_global_scatter_impl<scalar_t>( input_buf.data_ptr<scalar_t>(), local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), global_input_buf.data_ptr<scalar_t>(), in_feat, num_expert, n_workers, smgr ); })); return {global_input_buf,}; } template<typename scalar_t> void moe_cuda_global_gather_impl( const scalar_t* output_buf, const long* local_expert_count, const long* global_expert_count, scalar_t* local_output_buf, size_t out_feat, size_t num_expert, size_t world_size, CudaStreamManager* smgr) { long send_ptr = 0; /* TODO: may save for backward */ long *expert_ptr = new long[num_expert * world_size]; expert_ptr[0] = 0; for (int i = 1; i < num_expert * world_size; ++i) { expert_ptr[i] = expert_ptr[i - 1] + local_expert_count[i - 1]; } for (int i = 0; i < num_expert; ++i) { NCCL_SAFE_CALL(ncclGroupStart()); for (int j = 0; j < world_size; ++j) { int idx = i + j * num_expert; if (global_expert_count[idx]) { NCCL_SAFE_CALL(ncclSend( output_buf + send_ptr * out_feat, global_expert_count[idx] * out_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); send_ptr += global_expert_count[idx]; } if (local_expert_count[idx]) { NCCL_SAFE_CALL(ncclRecv( local_output_buf + expert_ptr[idx] * out_feat, local_expert_count[idx] * out_feat * sizeof(scalar_t), ncclChar, j, smgr->ncclcomm, smgr->stream(0))); } } NCCL_SAFE_CALL(ncclGroupEnd()); } delete [] expert_ptr; smgr->sync(1); } std::vector<torch::Tensor> moe_cuda_global_gather( torch::Tensor output_buf, torch::Tensor local_expert_count, torch::Tensor global_expert_count, long batch_size, long n_workers) { auto num_expert = local_expert_count.size(0) / n_workers; auto out_feat = output_buf.size(1); auto local_output_buf = output_buf.new_empty({batch_size, out_feat}); auto smgr = getCudaStreamManager(output_buf.device().index()); AT_DISPATCH_FLOATING_TYPES_AND_HALF(output_buf.scalar_type(), "moe_cuda_global_gather", ([&] { moe_cuda_global_gather_impl<scalar_t>( output_buf.data_ptr<scalar_t>(), local_expert_count.data_ptr<long>(), global_expert_count.data_ptr<long>(), local_output_buf.data_ptr<scalar_t>(), out_feat, num_expert, n_workers, smgr ); })); return {local_output_buf,}; } #endif
d1ec17ba73f4b61b4f88a50384b81c89d26cc5c8.hip
// !!! This is a file automatically generated by hipify!!! /* Babak Poursartip 02/27/2021 CUDA topic: pinned memory - Instead of using malloc or new to allocation memory on the CPU(host), we use hipHostMalloc(). This will allocate a pinned memory on the host. - To free the memory, we use hipHostFree, instead of delete to deallocate. - The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory. */ #include <iostream> // ============================== float timing(bool pinned, bool toDevice) { std::cout << " p: " << pinned <<" t: " << toDevice << std::endl; const int count = 1 << 20; const int iterations = 1 << 6; const int size = count * sizeof(int); hipEvent_t start, end; int *d, *h; float elapsed; hipError_t status; hipEventCreate(&start); hipEventCreate(&end); hipMalloc(&d, size); if(pinned) hipHostMalloc(&h, size, hipHostMallocDefault); else h = new int[count]; hipEventRecord(start); for (int i = 0; i < iterations; ++i) { if (toDevice) status = hipMemcpy(d, h, size, hipMemcpyHostToDevice); else status = hipMemcpy(h, d, size, hipMemcpyDeviceToHost); } hipEventRecord(end); hipEventSynchronize(end); hipEventElapsedTime(&elapsed, start, end); if(pinned) hipHostFree(h); else delete[] h; hipFree(d); hipEventDestroy(start); hipEventDestroy(end); return elapsed; } // ============================== int main() { printf(" starts \n"); std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl; std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl; std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl; std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl; printf(" done \n"); return 0; }
d1ec17ba73f4b61b4f88a50384b81c89d26cc5c8.cu
/* Babak Poursartip 02/27/2021 CUDA topic: pinned memory - Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host. - To free the memory, we use cudaFreeHost, instead of delete to deallocate. - The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory. */ #include <iostream> // ============================== float timing(bool pinned, bool toDevice) { std::cout << " p: " << pinned <<" t: " << toDevice << std::endl; const int count = 1 << 20; const int iterations = 1 << 6; const int size = count * sizeof(int); cudaEvent_t start, end; int *d, *h; float elapsed; cudaError_t status; cudaEventCreate(&start); cudaEventCreate(&end); cudaMalloc(&d, size); if(pinned) cudaHostAlloc(&h, size, cudaHostAllocDefault); else h = new int[count]; cudaEventRecord(start); for (int i = 0; i < iterations; ++i) { if (toDevice) status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); else status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed, start, end); if(pinned) cudaFreeHost(h); else delete[] h; cudaFree(d); cudaEventDestroy(start); cudaEventDestroy(end); return elapsed; } // ============================== int main() { printf(" starts \n"); std::cout << "From device - paged memory: \t" << timing(false, false) << std::endl; std::cout << "To device - paged memory: \t" << timing(false, true) << std::endl; std::cout << "From device - pinned memory: \t" << timing(true, false) << std::endl; std::cout << "To device - pinned memory: \t" << timing(true, true) << std::endl; printf(" done \n"); return 0; }
0a56f6a1d3f9891be39b8f3e06d46b7bbf9f3224.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> // includes, kernels #include <scan_largearray_kernel.cu> #define DEFAULT_NUM_ELEMENTS 16777216 #define MAX_RAND 3 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name, int size); void WriteFile(float*, char* file_name, int size); extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int errorM = 0; float device_time; float host_time; int* size = NULL; //(int*)malloc(1 * sizeof(int)); unsigned int data2read = 1; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( float) * num_elements; float* h_data = (float*) malloc( mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicate the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { case 2: // Determine size of array cutReadFilei(argv[1], &size, &data2read, true); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile(h_data, argv[2], num_elements); break; case 3: // Three Arguments cutReadFilei(argv[1], &size, &data2read, true); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); errorM = ReadFile(h_data, argv[2], size[0]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } unsigned int timer; CUT_SAFE_CALL(cutCreateTimer(&timer)); // compute reference solution float* reference = (float*) malloc( mem_size); cutStartTimer(timer); computeGold( reference, h_data, num_elements); cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer)); host_time = cutGetTimerValue(timer); CUT_SAFE_CALL(cutDeleteTimer(timer)); // **===-------- Lab4: Allocate data structure here -----------===** // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; CUDA_SAFE_CALL( hipMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( hipMalloc( (void**) &d_odata, mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) ); // initialize all the other device arrays to be safe CUDA_SAFE_CALL( hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice) ); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_odata, d_idata, 16); // Run the prescan CUT_SAFE_CALL(cutCreateTimer(&timer)); cutStartTimer(timer); // **===-------- Lab4: Modify the body of this function -----------===** preOperation(num_elements); prescanArray(d_odata, d_idata, num_elements); prevOperation(); // **===-----------------------------------------------------------===** CUDA_SAFE_CALL( hipDeviceSynchronize() ); cutStopTimer(timer); printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer)); device_time = cutGetTimerValue(timer); printf("Speedup: %fX\n", host_time/device_time); // **===-------- Lab4: Deallocate data structure here -----------===** // prevOperation(); // **===-----------------------------------------------------------===** // copy result from device to host CUDA_SAFE_CALL(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements, hipMemcpyDeviceToHost)); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile(h_data, argv[1], num_elements); } // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory cutDeleteTimer(timer); free( h_data); free( reference); hipFree( d_odata); hipFree( d_idata); } int ReadFile(float* M, char* file_name, int size) { unsigned int elements_read = size; if (cutReadFilef(file_name, &M, &elements_read, true)) return 1; else return 0; } void WriteFile(float* M, char* file_name, int size) { cutWriteFilef(file_name, M, size, 0.0001f); }
0a56f6a1d3f9891be39b8f3e06d46b7bbf9f3224.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifdef _WIN32 # define NOMINMAX #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil.h> // includes, kernels #include <scan_largearray_kernel.cu> #define DEFAULT_NUM_ELEMENTS 16777216 #define MAX_RAND 3 //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int ReadFile(float*, char* file_name, int size); void WriteFile(float*, char* file_name, int size); extern "C" unsigned int compare( const float* reference, const float* data, const unsigned int len); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int errorM = 0; float device_time; float host_time; int* size = NULL; //(int*)malloc(1 * sizeof(int)); unsigned int data2read = 1; int num_elements = 0; // Must support large, non-power-of-2 arrays // allocate host memory to store the input data unsigned int mem_size = sizeof( float) * num_elements; float* h_data = (float*) malloc( mem_size); // * No arguments: Randomly generate input data and compare against the // host's result. // * One argument: Randomly generate input data and write the result to // file name specified by first argument // * Two arguments: Read the first argument which indicate the size of the array, // randomly generate input data and write the input data // to the second argument. (for generating random input data) // * Three arguments: Read the first file which indicate the size of the array, // then input data from the file name specified by 2nd argument and write the // SCAN output to file name specified by the 3rd argument. switch(argc-1) { case 2: // Determine size of array cutReadFilei(argv[1], &size, &data2read, true); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); for( unsigned int i = 0; i < num_elements; ++i) { h_data[i] = (int)(rand() % MAX_RAND); } WriteFile(h_data, argv[2], num_elements); break; case 3: // Three Arguments cutReadFilei(argv[1], &size, &data2read, true); if(data2read != 1){ printf("Error reading parameter file\n"); exit(1); } num_elements = size[0]; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); errorM = ReadFile(h_data, argv[2], size[0]); if(errorM != 1) { printf("Error reading input file!\n"); exit(1); } break; default: // No Arguments or one argument // initialize the input data on the host to be integer values // between 0 and 1000 // Use DEFAULT_NUM_ELEMENTS num_elements num_elements = DEFAULT_NUM_ELEMENTS; // allocate host memory to store the input data mem_size = sizeof( float) * num_elements; h_data = (float*) malloc( mem_size); // initialize the input data on the host for( unsigned int i = 0; i < num_elements; ++i) { // h_data[i] = 1.0f; h_data[i] = (int)(rand() % MAX_RAND); } break; } unsigned int timer; CUT_SAFE_CALL(cutCreateTimer(&timer)); // compute reference solution float* reference = (float*) malloc( mem_size); cutStartTimer(timer); computeGold( reference, h_data, num_elements); cutStopTimer(timer); printf("\n\n**===-------------------------------------------------===**\n"); printf("Processing %d elements...\n", num_elements); printf("Host CPU Processing time: %f (ms)\n", cutGetTimerValue(timer)); host_time = cutGetTimerValue(timer); CUT_SAFE_CALL(cutDeleteTimer(timer)); // **===-------- Lab4: Allocate data structure here -----------===** // allocate device memory input and output arrays float* d_idata = NULL; float* d_odata = NULL; CUDA_SAFE_CALL( cudaMalloc( (void**) &d_idata, mem_size)); CUDA_SAFE_CALL( cudaMalloc( (void**) &d_odata, mem_size)); // copy host memory to device input array CUDA_SAFE_CALL( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) ); // initialize all the other device arrays to be safe CUDA_SAFE_CALL( cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice) ); // **===-----------------------------------------------------------===** // Run just once to remove startup overhead for more accurate performance // measurement prescanArray(d_odata, d_idata, 16); // Run the prescan CUT_SAFE_CALL(cutCreateTimer(&timer)); cutStartTimer(timer); // **===-------- Lab4: Modify the body of this function -----------===** preOperation(num_elements); prescanArray(d_odata, d_idata, num_elements); prevOperation(); // **===-----------------------------------------------------------===** CUDA_SAFE_CALL( cudaThreadSynchronize() ); cutStopTimer(timer); printf("CUDA Processing time: %f (ms)\n", cutGetTimerValue(timer)); device_time = cutGetTimerValue(timer); printf("Speedup: %fX\n", host_time/device_time); // **===-------- Lab4: Deallocate data structure here -----------===** // prevOperation(); // **===-----------------------------------------------------------===** // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements, cudaMemcpyDeviceToHost)); if ((argc - 1) == 3) // Three Arguments, write result to file { WriteFile(h_data, argv[3], num_elements); } else if ((argc - 1) == 1) // One Argument, write result to file { WriteFile(h_data, argv[1], num_elements); } // Check if the result is equivalent to the expected soluion unsigned int result_regtest = cutComparef( reference, h_data, num_elements); printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED"); // cleanup memory cutDeleteTimer(timer); free( h_data); free( reference); cudaFree( d_odata); cudaFree( d_idata); } int ReadFile(float* M, char* file_name, int size) { unsigned int elements_read = size; if (cutReadFilef(file_name, &M, &elements_read, true)) return 1; else return 0; } void WriteFile(float* M, char* file_name, int size) { cutWriteFilef(file_name, M, size, 0.0001f); }
2d422513a4892b1deb197be7ded5d5f7f79506d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void reverse_each_word(char *A, char *B, int len) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < len) B[id] = A[len - id - 1]; } int main() { int i, size; char *A = "GNINNIGEB WEN YM SI SIHT"; int len = strlen(A); char *B = (char *)malloc(sizeof(char) * len); char *d_a, *d_b; size = len * sizeof(char); hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMemcpy(d_a, A, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( reverse_each_word), dim3(ceil(len/256.0)), dim3(256), 0, 0, d_a, d_b, len); hipMemcpy(B, d_b, size, hipMemcpyDeviceToHost); printf("Original String : \n%s\n",A); printf("Desired Output :\n%s\n",B); hipFree(d_a); hipFree(d_b); free(B); return 0; }
2d422513a4892b1deb197be7ded5d5f7f79506d5.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void reverse_each_word(char *A, char *B, int len) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < len) B[id] = A[len - id - 1]; } int main() { int i, size; char *A = "GNINNIGEB WEN YM SI SIHT"; int len = strlen(A); char *B = (char *)malloc(sizeof(char) * len); char *d_a, *d_b; size = len * sizeof(char); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMemcpy(d_a, A, size, cudaMemcpyHostToDevice); reverse_each_word<<<ceil(len/256.0), 256>>>(d_a, d_b, len); cudaMemcpy(B, d_b, size, cudaMemcpyDeviceToHost); printf("Original String : \n%s\n",A); printf("Desired Output :\n%s\n",B); cudaFree(d_a); cudaFree(d_b); free(B); return 0; }
f0e03c4741b21f92098f9562330b10e10cfe302a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "distconv/dnn_backend/mean_squared_error.hpp" #include "distconv/runtime_gpu.hpp" #include "distconv/tensor/algorithms_cuda.hpp" #include "distconv/tensor/tensor_mpi.hpp" #include "distconv/util/util_gpu.hpp" #include "distconv/util/util_mpi.hpp" #include <limits> #if H2_HAS_CUDA #include <hipcub/hipcub.hpp> namespace cubns = cub; #elif H2_HAS_ROCM #include <hipcub/block/block_reduce.hpp> namespace cubns = hipcub; #endif using distconv::tensor::HIPAllocator; using distconv::tensor::LocaleMPI; template <typename DataType> using TensorCUDA = distconv::tensor::Tensor<DataType, LocaleMPI, HIPAllocator>; namespace distconv { namespace { /* - gridDim.y == number of samples - Each sample is taken care by gridDim.x blocks */ template <typename DataType, int BLOCK_SIZE> __global__ void fp_local(const DataType* __restrict__ prediction, const DataType* __restrict__ ground_truth, DataType* __restrict__ y, const index_t sample_size, const index_t sample_spatial_size, const index_t sample_channel_size, int thread_work_size) { const int tid = threadIdx.x; const int sample_idx = blockIdx.y; prediction += sample_idx * sample_size; ground_truth += sample_idx * sample_size; index_t offset = tid + blockIdx.x * BLOCK_SIZE; const int offset_stride = BLOCK_SIZE * gridDim.x; const index_t offset_limit = min(sample_size, offset + offset_stride * thread_work_size); auto psum = DataType(0.); for (; offset < offset_limit; offset += offset_stride) { const DataType x = prediction[offset]; const DataType xhat = ground_truth[offset]; const DataType err = x - xhat; psum += err * err; } using BlockReduce = cubns::BlockReduce<DataType, BLOCK_SIZE>; __shared__ typename BlockReduce::TempStorage temp_storage; psum = BlockReduce(temp_storage).Sum(psum) / sample_size; if (tid == 0) { atomic_add(&y[sample_idx], psum); } } /* - gridDim.y == number of samples - Each sample is taken care by gridDim.x blocks */ template <typename DataType, int BLOCK_SIZE> __global__ void bp_local(const DataType* __restrict__ x_pred, const DataType* __restrict__ x_truth, const DataType* __restrict__ dy, DataType* __restrict__ dx_pred, DataType* __restrict__ dx_truth, const index_t sample_size, const index_t sample_spatial_size, const index_t sample_channel_size, int thread_work_size) { const int tid = threadIdx.x; const int sample_idx = blockIdx.y; x_pred += sample_idx * sample_size; dx_pred += sample_idx * sample_size; x_truth += sample_idx * sample_size; dx_truth += sample_idx * sample_size; index_t offset = tid + blockIdx.x * BLOCK_SIZE; const int offset_stride = BLOCK_SIZE * gridDim.x; const index_t offset_limit = min(sample_size, offset + offset_stride * thread_work_size); const auto dy_sample = dy[sample_idx]; const DataType scale = static_cast<DataType>(DataType(2) / sample_size); for (; offset < offset_limit; offset += offset_stride) { const DataType x = x_pred[offset]; const DataType xhat = x_truth[offset]; const DataType err = x - xhat; dx_pred[offset] = scale * err * dy_sample; dx_truth[offset] = -scale * err * dy_sample; } } } // namespace template <typename Tensor> int MeanSquaredError<BackendDNNLib>::forward(const Tensor& x_pred, const Tensor& x_truth, Tensor& y) { using DataType = typename Tensor::data_type; util::MPIPrintStreamDebug() << "Mean squared error FP: " << x_pred << ", " << x_truth << ", " << y; constexpr int block_size = 256; constexpr int thread_work_size = 8; // Assumes no halo for simplicity assert_eq(x_pred.get_local_size(), x_pred.get_local_real_size()); assert_eq(x_truth.get_local_size(), x_truth.get_local_real_size()); const auto num_samples = x_pred.get_local_shape()[-1]; if (num_samples == 0) return 0; y.zero(m_stream); if (x_pred.get_local_size() > 0) { auto sample_size = x_pred.get_local_size() / num_samples; auto num_blocks_per_sample = util::ceil(sample_size, (index_t) block_size * thread_work_size); dim3 bdim(block_size); dim3 gdim(num_blocks_per_sample, num_samples); const auto sample_channel_size = x_pred.get_local_shape()[x_pred.get_num_spatial_dims()]; const auto sample_spatial_size = sample_size / sample_channel_size; assert_eq(sample_channel_size * sample_spatial_size, sample_size); hipLaunchKernelGGL(( fp_local<DataType, block_size>) , dim3(gdim), dim3(bdim), 0, m_stream, x_pred.get_const_buffer(), x_truth.get_const_buffer(), y.get_buffer(), sample_size, sample_spatial_size, sample_channel_size, thread_work_size); } if (m_num_procs_per_sample > 1) { Al::Allreduce<Al::NCCLBackend, DataType>(y.get_buffer(), num_samples, Al::ReductionOperator::sum, *m_al.get()); } return 0; } template <typename Tensor> int MeanSquaredError<BackendDNNLib>::backward(const Tensor& x_pred, const Tensor& x_truth, Tensor& dy, Tensor& dx_pred, Tensor& dx_truth) { using DataType = typename Tensor::data_type; util::MPIPrintStreamDebug() << "Mean squared error BP: " << dy << ", " << dx_pred << ", " << dx_truth; if (m_num_procs_per_sample > 1) { const auto num_samples = x_pred.get_local_shape()[-1]; Al::Bcast<Al::NCCLBackend, DataType>( dy.get_buffer(), num_samples, 0, *m_al.get()); } constexpr int block_size = 256; constexpr int thread_work_size = 8; // Assumes no halo for simplicity assert_eq(dx_pred.get_local_size(), dx_pred.get_local_real_size()); assert_eq(dx_truth.get_local_size(), dx_truth.get_local_real_size()); if (x_pred.get_local_size() == 0) return 0; auto num_samples = x_pred.get_local_shape()[-1]; auto sample_size = x_pred.get_local_size() / num_samples; auto num_blocks_per_sample = util::ceil(sample_size, (index_t) block_size * thread_work_size); dim3 bdim(block_size); dim3 gdim(num_blocks_per_sample, num_samples); const auto sample_channel_size = x_pred.get_local_shape()[x_pred.get_num_spatial_dims()]; const auto sample_spatial_size = sample_size / sample_channel_size; assert_eq(sample_channel_size * sample_spatial_size, sample_size); hipLaunchKernelGGL(( bp_local<DataType, block_size>) , dim3(gdim), dim3(bdim), 0, m_stream, x_pred.get_const_buffer(), x_truth.get_const_buffer(), dy.get_const_buffer(), dx_pred.get_buffer(), dx_truth.get_buffer(), sample_size, sample_spatial_size, sample_channel_size, thread_work_size); return 0; } #define PROTO(T) \ template int \ MeanSquaredError<BackendDNNLib>::forward<TensorCUDA<T>>( \ const TensorCUDA<T>& x_pred, \ const TensorCUDA<T>& x_truth, \ TensorCUDA<T>& y); \ template int \ MeanSquaredError<BackendDNNLib>::backward<TensorCUDA<T>>( \ const TensorCUDA<T>& x_pred, \ const TensorCUDA<T>& x_truth, \ TensorCUDA<T>& dy, \ TensorCUDA<T>& dx_pred, \ TensorCUDA<T>& dx_truth); PROTO(float) PROTO(double) #undef PROTO } // namespace distconv
f0e03c4741b21f92098f9562330b10e10cfe302a.cu
#include "distconv/dnn_backend/mean_squared_error.hpp" #include "distconv/runtime_gpu.hpp" #include "distconv/tensor/algorithms_cuda.hpp" #include "distconv/tensor/tensor_mpi.hpp" #include "distconv/util/util_gpu.hpp" #include "distconv/util/util_mpi.hpp" #include <limits> #if H2_HAS_CUDA #include <cub/block/block_reduce.cuh> namespace cubns = cub; #elif H2_HAS_ROCM #include <hipcub/block/block_reduce.hpp> namespace cubns = hipcub; #endif using distconv::tensor::CUDAAllocator; using distconv::tensor::LocaleMPI; template <typename DataType> using TensorCUDA = distconv::tensor::Tensor<DataType, LocaleMPI, CUDAAllocator>; namespace distconv { namespace { /* - gridDim.y == number of samples - Each sample is taken care by gridDim.x blocks */ template <typename DataType, int BLOCK_SIZE> __global__ void fp_local(const DataType* __restrict__ prediction, const DataType* __restrict__ ground_truth, DataType* __restrict__ y, const index_t sample_size, const index_t sample_spatial_size, const index_t sample_channel_size, int thread_work_size) { const int tid = threadIdx.x; const int sample_idx = blockIdx.y; prediction += sample_idx * sample_size; ground_truth += sample_idx * sample_size; index_t offset = tid + blockIdx.x * BLOCK_SIZE; const int offset_stride = BLOCK_SIZE * gridDim.x; const index_t offset_limit = min(sample_size, offset + offset_stride * thread_work_size); auto psum = DataType(0.); for (; offset < offset_limit; offset += offset_stride) { const DataType x = prediction[offset]; const DataType xhat = ground_truth[offset]; const DataType err = x - xhat; psum += err * err; } using BlockReduce = cubns::BlockReduce<DataType, BLOCK_SIZE>; __shared__ typename BlockReduce::TempStorage temp_storage; psum = BlockReduce(temp_storage).Sum(psum) / sample_size; if (tid == 0) { atomic_add(&y[sample_idx], psum); } } /* - gridDim.y == number of samples - Each sample is taken care by gridDim.x blocks */ template <typename DataType, int BLOCK_SIZE> __global__ void bp_local(const DataType* __restrict__ x_pred, const DataType* __restrict__ x_truth, const DataType* __restrict__ dy, DataType* __restrict__ dx_pred, DataType* __restrict__ dx_truth, const index_t sample_size, const index_t sample_spatial_size, const index_t sample_channel_size, int thread_work_size) { const int tid = threadIdx.x; const int sample_idx = blockIdx.y; x_pred += sample_idx * sample_size; dx_pred += sample_idx * sample_size; x_truth += sample_idx * sample_size; dx_truth += sample_idx * sample_size; index_t offset = tid + blockIdx.x * BLOCK_SIZE; const int offset_stride = BLOCK_SIZE * gridDim.x; const index_t offset_limit = min(sample_size, offset + offset_stride * thread_work_size); const auto dy_sample = dy[sample_idx]; const DataType scale = static_cast<DataType>(DataType(2) / sample_size); for (; offset < offset_limit; offset += offset_stride) { const DataType x = x_pred[offset]; const DataType xhat = x_truth[offset]; const DataType err = x - xhat; dx_pred[offset] = scale * err * dy_sample; dx_truth[offset] = -scale * err * dy_sample; } } } // namespace template <typename Tensor> int MeanSquaredError<BackendDNNLib>::forward(const Tensor& x_pred, const Tensor& x_truth, Tensor& y) { using DataType = typename Tensor::data_type; util::MPIPrintStreamDebug() << "Mean squared error FP: " << x_pred << ", " << x_truth << ", " << y; constexpr int block_size = 256; constexpr int thread_work_size = 8; // Assumes no halo for simplicity assert_eq(x_pred.get_local_size(), x_pred.get_local_real_size()); assert_eq(x_truth.get_local_size(), x_truth.get_local_real_size()); const auto num_samples = x_pred.get_local_shape()[-1]; if (num_samples == 0) return 0; y.zero(m_stream); if (x_pred.get_local_size() > 0) { auto sample_size = x_pred.get_local_size() / num_samples; auto num_blocks_per_sample = util::ceil(sample_size, (index_t) block_size * thread_work_size); dim3 bdim(block_size); dim3 gdim(num_blocks_per_sample, num_samples); const auto sample_channel_size = x_pred.get_local_shape()[x_pred.get_num_spatial_dims()]; const auto sample_spatial_size = sample_size / sample_channel_size; assert_eq(sample_channel_size * sample_spatial_size, sample_size); fp_local<DataType, block_size> <<<gdim, bdim, 0, m_stream>>>(x_pred.get_const_buffer(), x_truth.get_const_buffer(), y.get_buffer(), sample_size, sample_spatial_size, sample_channel_size, thread_work_size); } if (m_num_procs_per_sample > 1) { Al::Allreduce<Al::NCCLBackend, DataType>(y.get_buffer(), num_samples, Al::ReductionOperator::sum, *m_al.get()); } return 0; } template <typename Tensor> int MeanSquaredError<BackendDNNLib>::backward(const Tensor& x_pred, const Tensor& x_truth, Tensor& dy, Tensor& dx_pred, Tensor& dx_truth) { using DataType = typename Tensor::data_type; util::MPIPrintStreamDebug() << "Mean squared error BP: " << dy << ", " << dx_pred << ", " << dx_truth; if (m_num_procs_per_sample > 1) { const auto num_samples = x_pred.get_local_shape()[-1]; Al::Bcast<Al::NCCLBackend, DataType>( dy.get_buffer(), num_samples, 0, *m_al.get()); } constexpr int block_size = 256; constexpr int thread_work_size = 8; // Assumes no halo for simplicity assert_eq(dx_pred.get_local_size(), dx_pred.get_local_real_size()); assert_eq(dx_truth.get_local_size(), dx_truth.get_local_real_size()); if (x_pred.get_local_size() == 0) return 0; auto num_samples = x_pred.get_local_shape()[-1]; auto sample_size = x_pred.get_local_size() / num_samples; auto num_blocks_per_sample = util::ceil(sample_size, (index_t) block_size * thread_work_size); dim3 bdim(block_size); dim3 gdim(num_blocks_per_sample, num_samples); const auto sample_channel_size = x_pred.get_local_shape()[x_pred.get_num_spatial_dims()]; const auto sample_spatial_size = sample_size / sample_channel_size; assert_eq(sample_channel_size * sample_spatial_size, sample_size); bp_local<DataType, block_size> <<<gdim, bdim, 0, m_stream>>>(x_pred.get_const_buffer(), x_truth.get_const_buffer(), dy.get_const_buffer(), dx_pred.get_buffer(), dx_truth.get_buffer(), sample_size, sample_spatial_size, sample_channel_size, thread_work_size); return 0; } #define PROTO(T) \ template int \ MeanSquaredError<BackendDNNLib>::forward<TensorCUDA<T>>( \ const TensorCUDA<T>& x_pred, \ const TensorCUDA<T>& x_truth, \ TensorCUDA<T>& y); \ template int \ MeanSquaredError<BackendDNNLib>::backward<TensorCUDA<T>>( \ const TensorCUDA<T>& x_pred, \ const TensorCUDA<T>& x_truth, \ TensorCUDA<T>& dy, \ TensorCUDA<T>& dx_pred, \ TensorCUDA<T>& dx_truth); PROTO(float) PROTO(double) #undef PROTO } // namespace distconv
15c484007fa3219eb9aad5979c38efc6e92da34b.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <sstream> #include <photon_propagator/device_structs/cascade.h> #include <photon_propagator/cascades.hpp> #include <photon_propagator/particle.hpp> #include <photon_propagator/photon_yield.hpp> #include <photon_propagator/cuda/check_error.cuh> using std::stringstream; using std::string; using std::cerr; using std::endl; using std::shared_ptr; Cascades::Cascades(const size_t count, const shared_ptr<Device>& device): count_(count), device_(device) { CHECK_ERROR(hipMalloc((void**) &__device_ptr, count*sizeof(cascade))); }; size_t Cascades::nbytes() const { return host_cascades_.size()*sizeof(cascade); } const cascade& Cascades::at(size_t idx) const { return host_cascades_.at(idx); } size_t Cascades::n_photons() const{ size_t result{0}; for(auto c: host_cascades_){ result += c.q; } return result; } void Cascades::to_device(){ if(host_cascades_.size() <= count_){ unsigned long sizeof_cascade_buffer{host_cascades_.size()*sizeof(cascade)}; CHECK_ERROR(hipMemcpy(__device_ptr, host_cascades_.data(), sizeof_cascade_buffer, hipMemcpyHostToDevice)); }else{ cerr<<"ERROR: allocated "<<count_<<" pushing "<<host_cascades_.size()<<"."<<endl; } } void Cascades::add(const particle& p){ int itype{static_cast<int>(p.ptype)}; cascade cscd; cscd.q = photon_yield::cascade::yield(p.energy, itype); cscd.r.x = p.position[0]; cscd.r.x = p.position[1]; cscd.r.x = p.position[2]; cscd.r.w = p.time; cscd.n.x = p.direction[0]; cscd.n.y = p.direction[1]; cscd.n.z = p.direction[2]; int i{static_cast<int>(p.ptype)}; std::pair<float, float> parameters = photon_yield::cascade::longitudinal_profile_parameters(p.energy, i); cscd.a = parameters.first; cscd.b = parameters.second; host_cascades_.push_back(cscd); } Cascades::~Cascades(){ CHECK_ERROR(hipFree(__device_ptr)); };
15c484007fa3219eb9aad5979c38efc6e92da34b.cu
#include <iostream> #include <sstream> #include <photon_propagator/device_structs/cascade.h> #include <photon_propagator/cascades.hpp> #include <photon_propagator/particle.hpp> #include <photon_propagator/photon_yield.hpp> #include <photon_propagator/cuda/check_error.cuh> using std::stringstream; using std::string; using std::cerr; using std::endl; using std::shared_ptr; Cascades::Cascades(const size_t count, const shared_ptr<Device>& device): count_(count), device_(device) { CHECK_ERROR(cudaMalloc((void**) &__device_ptr, count*sizeof(cascade))); }; size_t Cascades::nbytes() const { return host_cascades_.size()*sizeof(cascade); } const cascade& Cascades::at(size_t idx) const { return host_cascades_.at(idx); } size_t Cascades::n_photons() const{ size_t result{0}; for(auto c: host_cascades_){ result += c.q; } return result; } void Cascades::to_device(){ if(host_cascades_.size() <= count_){ unsigned long sizeof_cascade_buffer{host_cascades_.size()*sizeof(cascade)}; CHECK_ERROR(cudaMemcpy(__device_ptr, host_cascades_.data(), sizeof_cascade_buffer, cudaMemcpyHostToDevice)); }else{ cerr<<"ERROR: allocated "<<count_<<" pushing "<<host_cascades_.size()<<"."<<endl; } } void Cascades::add(const particle& p){ int itype{static_cast<int>(p.ptype)}; cascade cscd; cscd.q = photon_yield::cascade::yield(p.energy, itype); cscd.r.x = p.position[0]; cscd.r.x = p.position[1]; cscd.r.x = p.position[2]; cscd.r.w = p.time; cscd.n.x = p.direction[0]; cscd.n.y = p.direction[1]; cscd.n.z = p.direction[2]; int i{static_cast<int>(p.ptype)}; std::pair<float, float> parameters = photon_yield::cascade::longitudinal_profile_parameters(p.energy, i); cscd.a = parameters.first; cscd.b = parameters.second; host_cascades_.push_back(cscd); } Cascades::~Cascades(){ CHECK_ERROR(cudaFree(__device_ptr)); };
844a4fe9a1240d3ee90c3ca4de8de16102dc7c63.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under // the License. // // // // // // // // // // // Authors: Aster JIAN ([email protected]) // Yzx ([email protected]) // Ao LI ([email protected]) // Paul LU ([email protected]) #include <hip/hip_runtime.h> #if TORCH_HIP_VERSION >= 10000 #include <NvInfer.h> #include <cassert> #include <cstring> #include <vector> #include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h" #include "trt_engine/trt_network_crt/plugins/gelu_plugin/gelu_plugin.h" using namespace nvinfer1; namespace bert { // constants for approximating the normal cdf constexpr float A = 0.5f; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) template <typename T, unsigned TPB> __global__ void geluKernel(const T a, const T b, const T c, int n, const T* input, T* output) { const int idx = blockIdx.x * TPB + threadIdx.x; if (idx < n) { const T in = input[idx]; const T cdf = a + a * tanh(in * (c * in * in + b)); output[idx] = in * cdf; } } int computeGelu(hipStream_t stream, int n, const float* input, float* output) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( geluKernel<float, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, n, input, output); CUDA_CHECK(hipPeekAtLastError()); return 0; } int computeGelu(hipStream_t stream, int n, const half* input, half* output) { constexpr int blockSize = 256; if (0 == (n & 1)) { const int n2 = n / 2; const int gridSize = (n2 + blockSize - 1) / blockSize; const half2 A2 = __floats2half2_rn(A, A); const half2 B2 = __floats2half2_rn(B, B); const half2 C2 = __floats2half2_rn(C, C); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); hipLaunchKernelGGL(( geluKernel<half2, blockSize>) , dim3(gridSize), dim3(blockSize), 0, stream, A2, B2, C2, n2, input2, output2); } else { const int gridSize = (n + blockSize - 1) / blockSize; hipLaunchKernelGGL(( geluKernel<half, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, A, B, C, n, input, output); } CUDA_CHECK(hipPeekAtLastError()); return 0; } template <typename T, int TPB> __global__ void geluBiasKernel(const T a, const T b, const T c, T* output, const T* input, const T* bias, const int ld) { const int offset = blockIdx.x * ld; for (int it = threadIdx.x; it < ld; it += TPB) { const int idx = it + offset; const T in = input[idx] + bias[it]; const T cdf = a + a * tanh(in * (c * in * in + b)); output[idx] = in * cdf; } } void computeGeluBias(float* output, const float* input, const float* bias, const int ld, const int cols, hipStream_t stream) { hipLaunchKernelGGL(( geluBiasKernel<float, 256>), dim3(cols), dim3(256), 0, stream, A, B, C, output, input, bias, ld); CUDA_CHECK(hipPeekAtLastError()); } void computeGeluBias(half* output, const half* input, const half* bias, const int ld, const int cols, hipStream_t stream) { if (ld & 1) { hipLaunchKernelGGL(( geluBiasKernel<half, 256>), dim3(cols), dim3(256), 0, stream, A, B, C, output, input, bias, ld); } else { const half2 A2 = __floats2half2_rn(A, A); const half2 B2 = __floats2half2_rn(B, B); const half2 C2 = __floats2half2_rn(C, C); const int ld2 = ld / 2; const half2* input2 = reinterpret_cast<const half2*>(input); const half2* bias2 = reinterpret_cast<const half2*>(bias); half2* output2 = reinterpret_cast<half2*>(output); hipLaunchKernelGGL(( geluBiasKernel<half2, 256>), dim3(cols), dim3(256), 0, stream, A2, B2, C2, output2, input2, bias2, ld2); } CUDA_CHECK(hipPeekAtLastError()); } } // namespace bert #endif // TORCH_HIP_VERSION >= 10000
844a4fe9a1240d3ee90c3ca4de8de16102dc7c63.cu
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under // the License. // // ╔════════════════════════════════════════════════════════════════════════════════════════╗ // ║──█████████╗───███████╗───████████╗───██╗──────██╗───███████╗───████████╗───████████╗───║ // ║──██╔══════╝──██╔════██╗──██╔════██╗──██║──────██║──██╔════██╗──██╔════██╗──██╔════██╗──║ // ║──████████╗───██║────██║──████████╔╝──██║──█╗──██║──█████████║──████████╔╝──██║────██║──║ // ║──██╔═════╝───██║────██║──██╔════██╗──██║█████╗██║──██╔════██║──██╔════██╗──██║────██║──║ // ║──██║─────────╚███████╔╝──██║────██║──╚████╔████╔╝──██║────██║──██║────██║──████████╔╝──║ // ║──╚═╝──────────╚══════╝───╚═╝────╚═╝───╚═══╝╚═══╝───╚═╝────╚═╝──╚═╝────╚═╝──╚═══════╝───║ // ╚════════════════════════════════════════════════════════════════════════════════════════╝ // // Authors: Aster JIAN ([email protected]) // Yzx ([email protected]) // Ao LI ([email protected]) // Paul LU ([email protected]) #include <cuda.h> #if CUDA_VERSION >= 10000 #include <NvInfer.h> #include <cassert> #include <cstring> #include <vector> #include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h" #include "trt_engine/trt_network_crt/plugins/gelu_plugin/gelu_plugin.h" using namespace nvinfer1; namespace bert { // constants for approximating the normal cdf constexpr float A = 0.5f; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) template <typename T, unsigned TPB> __global__ void geluKernel(const T a, const T b, const T c, int n, const T* input, T* output) { const int idx = blockIdx.x * TPB + threadIdx.x; if (idx < n) { const T in = input[idx]; const T cdf = a + a * tanh(in * (c * in * in + b)); output[idx] = in * cdf; } } int computeGelu(cudaStream_t stream, int n, const float* input, float* output) { constexpr int blockSize = 256; const int gridSize = (n + blockSize - 1) / blockSize; geluKernel<float, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, n, input, output); CUDA_CHECK(cudaPeekAtLastError()); return 0; } int computeGelu(cudaStream_t stream, int n, const half* input, half* output) { constexpr int blockSize = 256; if (0 == (n & 1)) { const int n2 = n / 2; const int gridSize = (n2 + blockSize - 1) / blockSize; const half2 A2 = __floats2half2_rn(A, A); const half2 B2 = __floats2half2_rn(B, B); const half2 C2 = __floats2half2_rn(C, C); const half2* input2 = reinterpret_cast<const half2*>(input); half2* output2 = reinterpret_cast<half2*>(output); geluKernel<half2, blockSize> <<<gridSize, blockSize, 0, stream>>>(A2, B2, C2, n2, input2, output2); } else { const int gridSize = (n + blockSize - 1) / blockSize; geluKernel<half, blockSize><<<gridSize, blockSize, 0, stream>>>(A, B, C, n, input, output); } CUDA_CHECK(cudaPeekAtLastError()); return 0; } template <typename T, int TPB> __global__ void geluBiasKernel(const T a, const T b, const T c, T* output, const T* input, const T* bias, const int ld) { const int offset = blockIdx.x * ld; for (int it = threadIdx.x; it < ld; it += TPB) { const int idx = it + offset; const T in = input[idx] + bias[it]; const T cdf = a + a * tanh(in * (c * in * in + b)); output[idx] = in * cdf; } } void computeGeluBias(float* output, const float* input, const float* bias, const int ld, const int cols, cudaStream_t stream) { geluBiasKernel<float, 256><<<cols, 256, 0, stream>>>(A, B, C, output, input, bias, ld); CUDA_CHECK(cudaPeekAtLastError()); } void computeGeluBias(half* output, const half* input, const half* bias, const int ld, const int cols, cudaStream_t stream) { if (ld & 1) { geluBiasKernel<half, 256><<<cols, 256, 0, stream>>>(A, B, C, output, input, bias, ld); } else { const half2 A2 = __floats2half2_rn(A, A); const half2 B2 = __floats2half2_rn(B, B); const half2 C2 = __floats2half2_rn(C, C); const int ld2 = ld / 2; const half2* input2 = reinterpret_cast<const half2*>(input); const half2* bias2 = reinterpret_cast<const half2*>(bias); half2* output2 = reinterpret_cast<half2*>(output); geluBiasKernel<half2, 256><<<cols, 256, 0, stream>>>(A2, B2, C2, output2, input2, bias2, ld2); } CUDA_CHECK(cudaPeekAtLastError()); } } // namespace bert #endif // CUDA_VERSION >= 10000
1a916cd83726ee67a760ee0efdde8b4912347d72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cstdlib> #include <ctime> #include <algorithm> /* TODOs Wrong result when size != 2^n Cannot handle (wrong result) when size is huge */ using namespace std; static void HandleError(hipError_t err, const char *file, int line) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); system("pause"); exit(EXIT_FAILURE); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //#define CHECK_RESULTS_OUTPUT __global__ void GenerateHistogramAndPredicate(int *input, int *currentBit, int *numBits, int *bitHistogram, int *predicate, int *size, int numBitsPow2) { int id = blockIdx.x * blockDim.x + threadIdx.x; int bid = blockIdx.x; if (id >= (*size)) { return; } extern __shared__ int localBin[]; if (threadIdx.x == 0) { for(int i = 0; i < numBitsPow2; i++) localBin[i] = 0; } __syncthreads(); int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1); //atomicAdd(&(bitHistogram[bit * gridDim.x + blockIdx.x]), 1); atomicAdd(&localBin[bit], 1); predicate[bit * (*size) + id] = 1; __syncthreads(); if (threadIdx.x == 0) { for(int i = 0; i < numBitsPow2; i++) bitHistogram[i * gridDim.x + bid] = localBin[i]; } __syncthreads(); } __global__ void PrefixSum(int *input, int *output, int *size, int *totalBits) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= (*size)) { return; } int bit = 0; for (bit = 0; bit < (*totalBits); bit++) { int current_value = input[bit * (*size) + id]; int current_cdf = input[bit * (*size) + id]; for (unsigned int interval = 1; interval < blockDim.x; interval <<= 1) { if (threadIdx.x >= interval) { current_cdf += input[bit * (*size) + id - interval]; } __syncthreads(); input[bit * (*size) + id] = current_cdf; __syncthreads(); } output[bit * (*size) + id] = input[bit * (*size) + id] - current_value; __syncthreads(); } } __global__ void PrefixSum_GPUGems(int *g_odata, int *g_idata, int totalSize, int n, int numBitPow2) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int id = blockIdx.x * blockDim.x + threadIdx.x; bool isEnd = ((2 * id + 1) >= totalSize); if (2 * id >= totalSize || 2 * thid >= n) { return; } for (int startOffset = 0; startOffset < numBitPow2; startOffset++) { int offset = 1; temp[2 * thid] = g_idata[startOffset * totalSize + 2 * id]; // load input into shared memory temp[2 * thid + 1] = isEnd ? 0 : g_idata[startOffset * totalSize + 2 * id + 1]; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2 * thid + 1) - 1; int bi = offset*(2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2 * thid + 1) - 1; int bi = offset*(2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[startOffset * totalSize + 2 * id] = temp[2 * thid]; // write results to device memory if (!isEnd) g_odata[startOffset * totalSize + 2 * id + 1] = temp[2 * thid + 1]; } } __global__ void ReOrder(int *input, int *output, int *bitScan, int *relativePos, int *currentBit, int *numBits, int *size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= (*size)) { return; } int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1); output[relativePos[bit * (*size) + id] + bitScan[bit * gridDim.x + blockIdx.x]] = input[id]; } int pow(int a, int b) { int result = 1; for (int i = 0; i < b; i++) { result *= a; } return result; } const int arraySize = 500000, gridSize = 1024; const int gridCount = ceil((float)arraySize / (float)gridSize); int input[arraySize] = { 0 }; int output[arraySize] = { 0 }; int main() { const int totalBits = 20, numBits = 1; const int numBitsPow2 = pow(2, numBits); int sizeBitScan = numBitsPow2 * gridCount; int one = 1; int tmp_bitHistogram[32] = { 0 }; printf("Data generation...\n===============\n| Range: 0 ~ %d\n| Size: %d\n| GSize: %d\n===============\n\n", pow(2, totalBits), arraySize, gridSize); //init data srand(time(0)); for (int i = 0; i < arraySize; i++) { input[i] = rand() % (pow(2, totalBits) - 1); } printf("Sending data to GPU...\n"); //Input: arraySize the input array //Output: arraySize result //currentBit: 1 current bit pos //bitLenth: 1 current bit lenth (numBits) //bitHistogram: 2^numBits count of items with value i at current bit //bitScan: 2^numBits prefix sum of bitHistogram //predicate: arraySize * 2^numBits T/F if item value equals to i at current bit //relativePos: arraySize * 2^numBits prefix sum of predicate //size: 1 arraySize int *d_Input = 0, *d_Output = 0, *d_bitHistogram = 0, *d_bitScan = 0, *d_predicate = 0, *d_relativePos = 0, *d_currentBit = 0, *d_bitLenth = 0, *d_size = 0, *d_sizeBitScan = 0, *d_one = 0, *d_bitLenthPow2 = 0; // Choose which GPU to run on, change this on a multi-GPU system. HANDLE_ERROR(hipSetDevice(0)); // Allocate GPU buffers HANDLE_ERROR(hipMalloc((void**)&d_Output, arraySize * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_Input, arraySize * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_bitHistogram, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_bitScan, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_predicate, arraySize * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_relativePos, arraySize * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_currentBit, sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_bitLenth, sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_size, sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_sizeBitScan, sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_one, sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&d_bitLenthPow2, sizeof(int))); // Copy input vectors from host memory to GPU buffers. HANDLE_ERROR(hipMemcpy(d_Input, input, arraySize * sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_bitLenth, &numBits, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_size, &arraySize, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_sizeBitScan, &sizeBitScan, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_bitLenthPow2, &numBitsPow2, sizeof(int), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(d_one, &one, sizeof(int), hipMemcpyHostToDevice)); printf("GPU Sort Started!\n"); std::clock_t start; start = std::clock(); //Do the sort for (int i = 0; i < totalBits; i += numBits) { //update current bit HANDLE_ERROR(hipMemcpy(d_currentBit, &i, sizeof(int), hipMemcpyHostToDevice)); //clear buffers HANDLE_ERROR(hipMemset(d_bitHistogram, 0, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMemset(d_bitScan, 0, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(hipMemset(d_predicate, 0, numBitsPow2 * arraySize * sizeof(int))); HANDLE_ERROR(hipMemset(d_relativePos, 0, numBitsPow2 * arraySize * sizeof(int))); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipMemcpy(output, d_Input, arraySize * sizeof(int), hipMemcpyDeviceToHost)); printf("Input:\t"); for (int i = 0; i < arraySize; i++) { printf("%d ", output[i]); } printf("\n"); #endif ///////////////// hipLaunchKernelGGL(( GenerateHistogramAndPredicate) , dim3(gridCount), dim3(gridSize), numBitsPow2 * sizeof(unsigned int) , 0, d_Input, d_currentBit, d_bitLenth, d_bitHistogram, d_predicate, d_size, numBitsPow2); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipDeviceSynchronize()); HANDLE_ERROR(hipMemcpy(tmp_bitHistogram, d_bitHistogram, numBitsPow2 * gridCount * sizeof(int), hipMemcpyDeviceToHost)); printf("Bit %d:\t", i); for (int j = 0; j < gridCount; j++) { for (int k = 0; k < numBitsPow2; k++) { printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]); } printf("| "); } printf("\n"); #endif ///////////////// hipLaunchKernelGGL(( PrefixSum) , dim3(1), dim3(numBitsPow2 * gridCount) , 0, 0, d_bitHistogram, d_bitScan, d_sizeBitScan, d_one); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipDeviceSynchronize()); HANDLE_ERROR(hipMemcpy(tmp_bitHistogram, d_bitScan, numBitsPow2 * gridCount * sizeof(int), hipMemcpyDeviceToHost)); printf("Scan %d:\t", i); for (int j = 0; j < gridCount; j++) { for (int k = 0; k < numBitsPow2; k++) { printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]); } printf("| "); } printf("\n"); #endif ///////////////// #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipMemcpy(tmp_bitHistogram, d_predicate, numBitsPow2 * arraySize * sizeof(int), hipMemcpyDeviceToHost)); printf("Pred %d:\t", i); for (int j = 0; j < numBitsPow2; j++) { for (int k = 0; k < arraySize; k++) { printf("%d ", tmp_bitHistogram[j * arraySize + k]); } printf("| "); } printf("\n"); #endif ///////////////// //PrefixSum <<< gridCount, gridSize >>> (d_relativePos, d_predicate, d_size, d_bitLenthPow2); hipLaunchKernelGGL(( PrefixSum_GPUGems) , dim3(gridCount), dim3(gridSize / 2), gridSize * sizeof(int) , 0, d_relativePos, d_predicate, arraySize, gridSize, numBitsPow2); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipDeviceSynchronize()); HANDLE_ERROR(hipMemcpy(tmp_bitHistogram, d_relativePos, numBitsPow2 * arraySize * sizeof(int), hipMemcpyDeviceToHost)); printf("RPos %d:\t", i); for (int j = 0; j < numBitsPow2; j++) { for (int k = 0; k < arraySize; k++) { printf("%d ", tmp_bitHistogram[j * arraySize + k]); } printf("| "); } printf("\n"); #endif ///////////////// hipLaunchKernelGGL(( ReOrder) , dim3(gridCount), dim3(gridSize) , 0, 0, d_Input, d_Output, d_bitScan, d_relativePos, d_currentBit, d_bitLenth, d_size); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(hipDeviceSynchronize()); HANDLE_ERROR(hipMemcpy(output, d_Output, arraySize * sizeof(int), hipMemcpyDeviceToHost)); printf("Output:\t"); for (int i = 0; i < arraySize; i++) { printf("%d ", output[i]); } printf("\n*--*--*--*--*--*\n"); #endif HANDLE_ERROR(hipDeviceSynchronize()); ///////////////// //Swap input and output for next iter int* tmp = d_Input; d_Input = d_Output; d_Output = tmp; //printf("\n*-*-*-*-*-*-*\n"); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. HANDLE_ERROR(hipDeviceSynchronize()); double duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; printf("\nGPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0); printf("Collecting results...\n"); HANDLE_ERROR(hipMemcpy(output, d_Input, arraySize * sizeof(int), hipMemcpyDeviceToHost)); printf("Checking results...\n\n"); bool validate = true, iszero = true; for (int i = 1; i < arraySize; i++) { if (output[i - 1] > output[i]) { validate = false; } if (output[i] != 0) { iszero = false; } } if (iszero) { validate = false; printf("* Result is full of zero!\n* CHECK the GPU part.\n\n"); } if (validate) { printf("Correct!\n"); } else { printf("Wrong...!\n"); } printf("\n==*==*==*==\nCPU Sort Started!\n"); start = std::clock(); std::sort(input, input + arraySize); duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; printf("\nCPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. HANDLE_ERROR(hipDeviceReset()); hipFree(d_Input); hipFree(d_Output); hipFree(d_bitHistogram); hipFree(d_bitScan); hipFree(d_predicate); hipFree(d_relativePos); hipFree(d_currentBit); hipFree(d_bitLenth); hipFree(d_size); system("pause"); return 0; }
1a916cd83726ee67a760ee0efdde8b4912347d72.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cstdlib> #include <ctime> #include <algorithm> /* TODOs Wrong result when size != 2^n Cannot handle (wrong result) when size is huge */ using namespace std; static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); system("pause"); exit(EXIT_FAILURE); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //#define CHECK_RESULTS_OUTPUT __global__ void GenerateHistogramAndPredicate(int *input, int *currentBit, int *numBits, int *bitHistogram, int *predicate, int *size, int numBitsPow2) { int id = blockIdx.x * blockDim.x + threadIdx.x; int bid = blockIdx.x; if (id >= (*size)) { return; } extern __shared__ int localBin[]; if (threadIdx.x == 0) { for(int i = 0; i < numBitsPow2; i++) localBin[i] = 0; } __syncthreads(); int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1); //atomicAdd(&(bitHistogram[bit * gridDim.x + blockIdx.x]), 1); atomicAdd(&localBin[bit], 1); predicate[bit * (*size) + id] = 1; __syncthreads(); if (threadIdx.x == 0) { for(int i = 0; i < numBitsPow2; i++) bitHistogram[i * gridDim.x + bid] = localBin[i]; } __syncthreads(); } __global__ void PrefixSum(int *input, int *output, int *size, int *totalBits) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= (*size)) { return; } int bit = 0; for (bit = 0; bit < (*totalBits); bit++) { int current_value = input[bit * (*size) + id]; int current_cdf = input[bit * (*size) + id]; for (unsigned int interval = 1; interval < blockDim.x; interval <<= 1) { if (threadIdx.x >= interval) { current_cdf += input[bit * (*size) + id - interval]; } __syncthreads(); input[bit * (*size) + id] = current_cdf; __syncthreads(); } output[bit * (*size) + id] = input[bit * (*size) + id] - current_value; __syncthreads(); } } __global__ void PrefixSum_GPUGems(int *g_odata, int *g_idata, int totalSize, int n, int numBitPow2) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int id = blockIdx.x * blockDim.x + threadIdx.x; bool isEnd = ((2 * id + 1) >= totalSize); if (2 * id >= totalSize || 2 * thid >= n) { return; } for (int startOffset = 0; startOffset < numBitPow2; startOffset++) { int offset = 1; temp[2 * thid] = g_idata[startOffset * totalSize + 2 * id]; // load input into shared memory temp[2 * thid + 1] = isEnd ? 0 : g_idata[startOffset * totalSize + 2 * id + 1]; for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2 * thid + 1) - 1; int bi = offset*(2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2 * thid + 1) - 1; int bi = offset*(2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[startOffset * totalSize + 2 * id] = temp[2 * thid]; // write results to device memory if (!isEnd) g_odata[startOffset * totalSize + 2 * id + 1] = temp[2 * thid + 1]; } } __global__ void ReOrder(int *input, int *output, int *bitScan, int *relativePos, int *currentBit, int *numBits, int *size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id >= (*size)) { return; } int bit = (input[id] >> (*currentBit)) & ((1 << *numBits) - 1); output[relativePos[bit * (*size) + id] + bitScan[bit * gridDim.x + blockIdx.x]] = input[id]; } int pow(int a, int b) { int result = 1; for (int i = 0; i < b; i++) { result *= a; } return result; } const int arraySize = 500000, gridSize = 1024; const int gridCount = ceil((float)arraySize / (float)gridSize); int input[arraySize] = { 0 }; int output[arraySize] = { 0 }; int main() { const int totalBits = 20, numBits = 1; const int numBitsPow2 = pow(2, numBits); int sizeBitScan = numBitsPow2 * gridCount; int one = 1; int tmp_bitHistogram[32] = { 0 }; printf("Data generation...\n===============\n| Range: 0 ~ %d\n| Size: %d\n| GSize: %d\n===============\n\n", pow(2, totalBits), arraySize, gridSize); //init data srand(time(0)); for (int i = 0; i < arraySize; i++) { input[i] = rand() % (pow(2, totalBits) - 1); } printf("Sending data to GPU...\n"); //Input: arraySize the input array //Output: arraySize result //currentBit: 1 current bit pos //bitLenth: 1 current bit lenth (numBits) //bitHistogram: 2^numBits count of items with value i at current bit //bitScan: 2^numBits prefix sum of bitHistogram //predicate: arraySize * 2^numBits T/F if item value equals to i at current bit //relativePos: arraySize * 2^numBits prefix sum of predicate //size: 1 arraySize int *d_Input = 0, *d_Output = 0, *d_bitHistogram = 0, *d_bitScan = 0, *d_predicate = 0, *d_relativePos = 0, *d_currentBit = 0, *d_bitLenth = 0, *d_size = 0, *d_sizeBitScan = 0, *d_one = 0, *d_bitLenthPow2 = 0; // Choose which GPU to run on, change this on a multi-GPU system. HANDLE_ERROR(cudaSetDevice(0)); // Allocate GPU buffers HANDLE_ERROR(cudaMalloc((void**)&d_Output, arraySize * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_Input, arraySize * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_bitHistogram, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_bitScan, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_predicate, arraySize * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_relativePos, arraySize * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_currentBit, sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_bitLenth, sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_size, sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_sizeBitScan, sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_one, sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&d_bitLenthPow2, sizeof(int))); // Copy input vectors from host memory to GPU buffers. HANDLE_ERROR(cudaMemcpy(d_Input, input, arraySize * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_bitLenth, &numBits, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_size, &arraySize, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_sizeBitScan, &sizeBitScan, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_bitLenthPow2, &numBitsPow2, sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_one, &one, sizeof(int), cudaMemcpyHostToDevice)); printf("GPU Sort Started!\n"); std::clock_t start; start = std::clock(); //Do the sort for (int i = 0; i < totalBits; i += numBits) { //update current bit HANDLE_ERROR(cudaMemcpy(d_currentBit, &i, sizeof(int), cudaMemcpyHostToDevice)); //clear buffers HANDLE_ERROR(cudaMemset(d_bitHistogram, 0, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMemset(d_bitScan, 0, gridCount * numBitsPow2 * sizeof(int))); HANDLE_ERROR(cudaMemset(d_predicate, 0, numBitsPow2 * arraySize * sizeof(int))); HANDLE_ERROR(cudaMemset(d_relativePos, 0, numBitsPow2 * arraySize * sizeof(int))); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaMemcpy(output, d_Input, arraySize * sizeof(int), cudaMemcpyDeviceToHost)); printf("Input:\t"); for (int i = 0; i < arraySize; i++) { printf("%d ", output[i]); } printf("\n"); #endif ///////////////// GenerateHistogramAndPredicate <<< gridCount, gridSize, numBitsPow2 * sizeof(unsigned int) >>> (d_Input, d_currentBit, d_bitLenth, d_bitHistogram, d_predicate, d_size, numBitsPow2); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaDeviceSynchronize()); HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_bitHistogram, numBitsPow2 * gridCount * sizeof(int), cudaMemcpyDeviceToHost)); printf("Bit %d:\t", i); for (int j = 0; j < gridCount; j++) { for (int k = 0; k < numBitsPow2; k++) { printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]); } printf("| "); } printf("\n"); #endif ///////////////// PrefixSum <<< 1, numBitsPow2 * gridCount >>> (d_bitHistogram, d_bitScan, d_sizeBitScan, d_one); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaDeviceSynchronize()); HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_bitScan, numBitsPow2 * gridCount * sizeof(int), cudaMemcpyDeviceToHost)); printf("Scan %d:\t", i); for (int j = 0; j < gridCount; j++) { for (int k = 0; k < numBitsPow2; k++) { printf("%d ", tmp_bitHistogram[j * numBitsPow2 + k]); } printf("| "); } printf("\n"); #endif ///////////////// #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_predicate, numBitsPow2 * arraySize * sizeof(int), cudaMemcpyDeviceToHost)); printf("Pred %d:\t", i); for (int j = 0; j < numBitsPow2; j++) { for (int k = 0; k < arraySize; k++) { printf("%d ", tmp_bitHistogram[j * arraySize + k]); } printf("| "); } printf("\n"); #endif ///////////////// //PrefixSum <<< gridCount, gridSize >>> (d_relativePos, d_predicate, d_size, d_bitLenthPow2); PrefixSum_GPUGems <<< gridCount, gridSize / 2, gridSize * sizeof(int) >>> (d_relativePos, d_predicate, arraySize, gridSize, numBitsPow2); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaDeviceSynchronize()); HANDLE_ERROR(cudaMemcpy(tmp_bitHistogram, d_relativePos, numBitsPow2 * arraySize * sizeof(int), cudaMemcpyDeviceToHost)); printf("RPos %d:\t", i); for (int j = 0; j < numBitsPow2; j++) { for (int k = 0; k < arraySize; k++) { printf("%d ", tmp_bitHistogram[j * arraySize + k]); } printf("| "); } printf("\n"); #endif ///////////////// ReOrder <<< gridCount, gridSize >>> (d_Input, d_Output, d_bitScan, d_relativePos, d_currentBit, d_bitLenth, d_size); #ifdef CHECK_RESULTS_OUTPUT //check results HANDLE_ERROR(cudaDeviceSynchronize()); HANDLE_ERROR(cudaMemcpy(output, d_Output, arraySize * sizeof(int), cudaMemcpyDeviceToHost)); printf("Output:\t"); for (int i = 0; i < arraySize; i++) { printf("%d ", output[i]); } printf("\n*--*--*--*--*--*\n"); #endif HANDLE_ERROR(cudaDeviceSynchronize()); ///////////////// //Swap input and output for next iter int* tmp = d_Input; d_Input = d_Output; d_Output = tmp; //printf("\n*-*-*-*-*-*-*\n"); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. HANDLE_ERROR(cudaDeviceSynchronize()); double duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; printf("\nGPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0); printf("Collecting results...\n"); HANDLE_ERROR(cudaMemcpy(output, d_Input, arraySize * sizeof(int), cudaMemcpyDeviceToHost)); printf("Checking results...\n\n"); bool validate = true, iszero = true; for (int i = 1; i < arraySize; i++) { if (output[i - 1] > output[i]) { validate = false; } if (output[i] != 0) { iszero = false; } } if (iszero) { validate = false; printf("* Result is full of zero!\n* CHECK the GPU part.\n\n"); } if (validate) { printf("Correct!\n"); } else { printf("Wrong...!\n"); } printf("\n==*==*==*==\nCPU Sort Started!\n"); start = std::clock(); std::sort(input, input + arraySize); duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; printf("\nCPU Sort Finished! time cost (ms): %.3lf\n\n", duration * 1000.0); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. HANDLE_ERROR(cudaDeviceReset()); cudaFree(d_Input); cudaFree(d_Output); cudaFree(d_bitHistogram); cudaFree(d_bitScan); cudaFree(d_predicate); cudaFree(d_relativePos); cudaFree(d_currentBit); cudaFree(d_bitLenth); cudaFree(d_size); system("pause"); return 0; }
8d359a2f2a39953e1863cc5099373cfc5670dda1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * _reg_bspline_gpu.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_BSPLINE_GPU_CU #define _REG_BSPLINE_GPU_CU #include "_reg_localTransformation_gpu.h" #include "_reg_localTransformation_kernels.cu" #include <sys/time.h> /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_gpu(nifti_image *controlPointImage, nifti_image *reference, float4 **controlPointImageArray_d, float4 **positionFieldImageArray_d, int **mask_d, int activeVoxelNumber, bool bspline) { const int voxelNumber = reference->nx * reference->ny * reference->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(reference->nx, reference->ny, reference->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int useBSpline = bspline; const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / reference->dx, controlPointImage->dy / reference->dy, controlPointImage->dz / reference->dz); struct timeval t1, t2; double elapsedTime; gettimeofday(&t1, NULL); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_UseBSpline,&useBSpline,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipBindTexture(0, controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(hipBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))) const unsigned int Grid_reg_bspline_getDeformationField = (unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(Block_reg_bspline_getDeformationField))); dim3 G1(Grid_reg_bspline_getDeformationField,Grid_reg_bspline_getDeformationField,1); dim3 B1(Block_reg_bspline_getDeformationField,1,1); hipLaunchKernelGGL(( reg_bspline_getDeformationField) , dim3(G1), dim3(B1) , 0, 0, *positionFieldImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture)) gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; printf("[NiftyReg F3D] reg_bspline_getDeformationField time =%f msec\n", elapsedTime); //printf("[NiftyReg F3D] reg_bspline_getDeformationField throughput =%f voxel per sec\n", (activeVoxelNumber*1000)/elapsedTime); return; } /* *************************************************************** */ /* *************************************************************** */ float reg_bspline_ApproxBendingEnergy_gpu(nifti_image *controlPointImage, float4 **controlPointImageArray_d) { const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxSecondDerivatives))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(Block_reg_bspline_getApproxSecondDerivatives,1,1); hipLaunchKernelGGL(( reg_bspline_getApproxSecondDerivatives) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture)) // Compute the bending energy from the second derivatives float *penaltyTerm_d; NR_CUDA_SAFE_CALL(hipMalloc(&penaltyTerm_d, controlPointNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_reg_bspline_ApproxBendingEnergy = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxBendingEnergy))); dim3 G2(Grid_reg_bspline_ApproxBendingEnergy,Grid_reg_bspline_ApproxBendingEnergy,1); dim3 B2(Block_reg_bspline_getApproxBendingEnergy,1,1); hipLaunchKernelGGL(( reg_bspline_getApproxBendingEnergy_kernel) , dim3(G2), dim3(B2) , 0, 0, penaltyTerm_d); NR_CUDA_CHECK_KERNEL(G2,B2) NR_CUDA_SAFE_CALL(hipUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(hipFree(secondDerivativeValues_d)) // Transfert the vales back to the CPU and average them float *penaltyTerm_h; NR_CUDA_SAFE_CALL(hipHostMalloc(&penaltyTerm_h, controlPointNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMemcpy(penaltyTerm_h, penaltyTerm_d, controlPointNumber*sizeof(float), hipMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(hipFree(penaltyTerm_d)) double penaltyValue=0.0; for(int i=0;i<controlPointNumber;i++) penaltyValue += penaltyTerm_h[i]; NR_CUDA_SAFE_CALL(hipHostFree((void *)penaltyTerm_h)) return (float)(penaltyValue/(3.0*(double)controlPointNumber)); } /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_ApproxBendingEnergyGradient_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeNMIGradientArray_d, float bendingEnergyWeight) { const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; NR_CUDA_SAFE_CALL(hipMalloc(&secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxSecondDerivatives))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(Block_reg_bspline_getApproxSecondDerivatives,1,1); hipLaunchKernelGGL(( reg_bspline_getApproxSecondDerivatives) , dim3(G1), dim3(B1) , 0, 0, secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture)) // Compute the gradient bendingEnergyWeight *= referenceImage->nx*referenceImage->ny*referenceImage->nz / (controlPointImage->nx*controlPointImage->ny*controlPointImage->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Weight,&bendingEnergyWeight,sizeof(float))) NR_CUDA_SAFE_CALL(hipBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_reg_bspline_getApproxBendingEnergyGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxBendingEnergyGradient))); dim3 G2(Grid_reg_bspline_getApproxBendingEnergyGradient,Grid_reg_bspline_getApproxBendingEnergyGradient,1); dim3 B2(Block_reg_bspline_getApproxBendingEnergyGradient,1,1); hipLaunchKernelGGL(( reg_bspline_getApproxBendingEnergyGradient_kernel) , dim3(G2), dim3(B2) , 0, 0, *nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G2,B2) NR_CUDA_SAFE_CALL(hipUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(hipFree(secondDerivativeValues_d)) return; } /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_ComputeApproxJacobianValues(nifti_image *controlPointImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // The Jacobian matrix is computed for every control point const unsigned int Grid_reg_bspline_getApproxJacobianValues = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxJacobianValues))); dim3 G1(Grid_reg_bspline_getApproxJacobianValues,Grid_reg_bspline_getApproxJacobianValues,1); dim3 B1(Block_reg_bspline_getApproxJacobianValues,1,1); hipLaunchKernelGGL(( reg_bspline_getApproxJacobianValues_kernel), dim3(G1), dim3(B1), 0, 0, *jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture)) } /* *************************************************************** */ void reg_bspline_ComputeJacobianValues(nifti_image *controlPointImage, nifti_image *referenceImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) // The Jacobian matrix is computed for every voxel const unsigned int Grid_reg_bspline_getJacobianValues = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_bspline_getJacobianValues))); dim3 G1(Grid_reg_bspline_getJacobianValues,Grid_reg_bspline_getJacobianValues,1); dim3 B1(Block_reg_bspline_getJacobianValues,1,1); hipLaunchKernelGGL(( reg_bspline_getJacobianValues_kernel), dim3(G1), dim3(B1), 0, 0, *jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(controlPointTexture)) } /* *************************************************************** */ /* *************************************************************** */ double reg_bspline_ComputeJacobianPenaltyTerm_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx ) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum=(controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2); NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; jacSum=jacNumber; NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d)) // The Jacobian determinant are squared and logged (might not be english but will do) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) const unsigned int Grid_reg_bspline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(Block_reg_bspline_logSquaredValues))); dim3 G1(Grid_reg_bspline_logSquaredValues,Grid_reg_bspline_logSquaredValues,1); dim3 B1(Block_reg_bspline_logSquaredValues,1,1); hipLaunchKernelGGL(( reg_bspline_logSquaredValues_kernel), dim3(G1), dim3(B1), 0, 0, jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) // Transfert the data back to the CPU float *jacobianDet_h; NR_CUDA_SAFE_CALL(hipHostMalloc(&jacobianDet_h,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMemcpy(jacobianDet_h,jacobianDet_d, jacNumber*sizeof(float), hipMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d)) double penaltyTermValue=0.; for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i]; NR_CUDA_SAFE_CALL(hipHostFree(jacobianDet_h)) return penaltyTermValue/jacSum; } /* *************************************************************** */ void reg_bspline_ComputeJacobianPenaltyTermGradient_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeNMIGradientArray_d, float jacobianWeight, bool approx) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(desorient.m[0][0],desorient.m[0][1],desorient.m[0][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(desorient.m[1][0],desorient.m[1][1],desorient.m[1][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(desorient.m[2][0],desorient.m[2][1],desorient.m[2][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) if(approx){ float weight=jacobianWeight; weight = jacobianWeight * (float)(referenceImage->nx * referenceImage->ny * referenceImage->nz) / (float)( controlPointImage->nx*controlPointImage->ny*controlPointImage->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Weight,&weight,sizeof(float))) const unsigned int Grid_reg_bspline_computeApproxJacGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_computeApproxJacGradient))); dim3 G1(Grid_reg_bspline_computeApproxJacGradient,Grid_reg_bspline_computeApproxJacGradient,1); dim3 B1(Block_reg_bspline_computeApproxJacGradient,1,1); hipLaunchKernelGGL(( reg_bspline_computeApproxJacGradient_kernel), dim3(G1), dim3(B1), 0, 0, *nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_Weight,&jacobianWeight,sizeof(float))) const unsigned int Grid_reg_bspline_computeJacGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_computeJacGradient))); dim3 G1(Grid_reg_bspline_computeJacGradient,Grid_reg_bspline_computeJacGradient,1); dim3 B1(Block_reg_bspline_computeJacGradient,1,1); hipLaunchKernelGGL(( reg_bspline_computeJacGradient_kernel), dim3(G1), dim3(B1), 0, 0, *nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d)) } /* *************************************************************** */ double reg_bspline_correctFolding_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2); NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacSum=jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; NR_CUDA_SAFE_CALL(hipMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Check if the Jacobian determinant average NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) float *jacobianDet2_d; NR_CUDA_SAFE_CALL(hipMalloc(&jacobianDet2_d,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMemcpy(jacobianDet2_d,jacobianDet_d,jacNumber*sizeof(float),hipMemcpyDeviceToDevice)) const unsigned int Grid_reg_bspline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(Block_reg_bspline_logSquaredValues))); dim3 G1(Grid_reg_bspline_logSquaredValues,Grid_reg_bspline_logSquaredValues,1); dim3 B1(Block_reg_bspline_logSquaredValues,1,1); hipLaunchKernelGGL(( reg_bspline_logSquaredValues_kernel), dim3(G1), dim3(B1), 0, 0, jacobianDet2_d); NR_CUDA_CHECK_KERNEL(G1,B1) float *jacobianDet_h; NR_CUDA_SAFE_CALL(hipHostMalloc(&jacobianDet_h,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipMemcpy(jacobianDet_h,jacobianDet2_d, jacNumber*sizeof(float), hipMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(hipFree(jacobianDet2_d)) double penaltyTermValue=0.; for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i]; NR_CUDA_SAFE_CALL(hipHostFree(jacobianDet_h)) penaltyTermValue /= jacSum; if(penaltyTermValue==penaltyTermValue){ NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d)) return penaltyTermValue; } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(desorient.m[0][0],desorient.m[0][1],desorient.m[0][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(desorient.m[1][0],desorient.m[1][1],desorient.m[1][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(desorient.m[2][0],desorient.m[2][1],desorient.m[2][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(hipBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) if(approx){ const unsigned int Grid_reg_bspline_approxCorrectFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_approxCorrectFolding))); dim3 G1(Grid_reg_bspline_approxCorrectFolding,Grid_reg_bspline_approxCorrectFolding,1); dim3 B1(Block_reg_bspline_approxCorrectFolding,1,1); hipLaunchKernelGGL(( reg_bspline_approxCorrectFolding_kernel), dim3(G1), dim3(B1), 0, 0, *controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) const unsigned int Grid_reg_bspline_correctFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_correctFolding))); dim3 G1(Grid_reg_bspline_correctFolding,Grid_reg_bspline_correctFolding,1); dim3 B1(Block_reg_bspline_correctFolding,1,1); hipLaunchKernelGGL(( reg_bspline_correctFolding_kernel), dim3(G1), dim3(B1), 0, 0, *controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(hipFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(hipFree(jacobianMatrices_d)) return std::numeric_limits<double>::quiet_NaN(); } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFromDisplacement_gpu( nifti_image *image, float4 **imageArray_d) { // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDeformationFromDisplacement = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(512))); dim3 G1(Grid_reg_getDeformationFromDisplacement,Grid_reg_getDeformationFromDisplacement,1); dim3 B1(512,1,1); hipLaunchKernelGGL(( reg_getDeformationFromDisplacement_kernel), dim3(G1), dim3(B1), 0, 0, *imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDisplacementFromDeformation_gpu( nifti_image *image, float4 **imageArray_d) { // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDisplacementFromDeformation = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(512))); dim3 G1(Grid_reg_getDisplacementFromDeformation,Grid_reg_getDisplacementFromDeformation,1); dim3 B1(512,1,1); hipLaunchKernelGGL(( reg_getDisplacementFromDeformation_kernel), dim3(G1), dim3(B1), 0, 0, *imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h, nifti_image *def_h, float4 **cpp_gpu, float4 **def_gpu, float4 **interDef_gpu, int **mask_gpu, int activeVoxel, bool approxComp) { if(approxComp){ fprintf(stderr, "[NiftyReg] reg_getDeformationFieldFromVelocityGrid_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR Approximation not implemented yet on the GPU\n"); exit(1); } const int controlPointNumber = cpp_h->nx * cpp_h->ny * cpp_h->nz; const int voxelNumber = def_h->nx * def_h->ny * def_h->nz; if(voxelNumber != activeVoxel){ fprintf(stderr, "[NiftyReg] reg_getDeformationFieldFromVelocityGrid_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR The mask must contains all voxel\n"); exit(1); } // A scaled down velocity field is first store float4 *scaledVelocityField_d=NULL; NR_CUDA_SAFE_CALL(hipMalloc(&scaledVelocityField_d,controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(hipMemcpy(scaledVelocityField_d,*cpp_gpu,controlPointNumber*sizeof(float4),hipMemcpyDeviceToDevice)) reg_getDisplacementFromDeformation_gpu(cpp_h, &scaledVelocityField_d); reg_multiplyValue_gpu(controlPointNumber,&scaledVelocityField_d,1.f/cpp_h->pixdim[5]); reg_getDeformationFromDisplacement_gpu(cpp_h, &scaledVelocityField_d); if(!approxComp){ float4 *tempDef=NULL; float4 *currentDefPtr0=NULL; float4 *currentDefPtr1=NULL; if(interDef_gpu==NULL){ NR_CUDA_SAFE_CALL(hipMalloc(&tempDef,voxelNumber*sizeof(float4))) currentDefPtr0 = *def_gpu; currentDefPtr1 = tempDef; } else{ currentDefPtr0 = interDef_gpu[0]; currentDefPtr1 = interDef_gpu[1]; } reg_bspline_gpu(cpp_h, def_h, &scaledVelocityField_d, &currentDefPtr0, mask_gpu, activeVoxel, true); for(unsigned int i=0;i<cpp_h->pixdim[5];++i){ NR_CUDA_SAFE_CALL(hipMemcpy(currentDefPtr1,currentDefPtr0,voxelNumber*sizeof(float4),hipMemcpyDeviceToDevice)) if(interDef_gpu==NULL){ reg_defField_compose_gpu(def_h, &currentDefPtr1, &currentDefPtr0, mask_gpu, activeVoxel); } else{ reg_defField_compose_gpu(def_h, &currentDefPtr0, &currentDefPtr1, mask_gpu, activeVoxel); if(i==cpp_h->pixdim[5]-2){ currentDefPtr0 = interDef_gpu[i+1]; currentDefPtr1 = *def_gpu; } else if(i<cpp_h->pixdim[5]-2){ currentDefPtr0 = interDef_gpu[i+1]; currentDefPtr1 = interDef_gpu[i+2]; } } } if(tempDef!=NULL) NR_CUDA_SAFE_CALL(hipFree(tempDef)); } NR_CUDA_SAFE_CALL(hipFree(scaledVelocityField_d)) } /* *************************************************************** */ /* *************************************************************** */ void reg_getInverseDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h, nifti_image *def_h, float4 **cpp_gpu, float4 **def_gpu, float4 **interDef_gpu, int **mask_gpu, int activeVoxel, bool approxComp) { const int controlPointNumber = cpp_h->nx * cpp_h->ny * cpp_h->nz; // The CPP file is first negated float4 *invertedCpp_gpu=NULL; NR_CUDA_SAFE_CALL(hipMalloc(&invertedCpp_gpu,controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(hipMemcpy(invertedCpp_gpu,*cpp_gpu,controlPointNumber*sizeof(float4),hipMemcpyDeviceToDevice)) reg_getDisplacementFromDeformation_gpu(cpp_h, &invertedCpp_gpu); reg_multiplyValue_gpu(controlPointNumber,&invertedCpp_gpu,-1.f); reg_getDeformationFromDisplacement_gpu(cpp_h, &invertedCpp_gpu); reg_getDeformationFieldFromVelocityGrid_gpu(cpp_h, def_h, &invertedCpp_gpu, def_gpu, interDef_gpu, mask_gpu, activeVoxel, approxComp); NR_CUDA_SAFE_CALL(hipFree(invertedCpp_gpu)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_compose_gpu(nifti_image *def, float4 **def_gpu, float4 **defOut_gpu, int **mask_gpu, int activeVoxel) { const int voxelNumber=def->nx*def->ny*def->nz; if(voxelNumber != activeVoxel){ fprintf(stderr, "[NiftyReg] reg_defField_compose_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR no mask can be used\n"); exit(1); } // Bind the qform or sform mat44 temp_mat=def->qto_ijk; if(def->sform_code>0) temp_mat=def->sto_ijk; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) temp_mat=def->qto_xyz; if(def->sform_code>0) temp_mat=def->sto_xyz; temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2c,&temp,sizeof(float4))) const int3 referenceImageDim=make_int3(def->nx,def->ny,def->nz); NR_CUDA_SAFE_CALL(hipBindTexture(0,voxelDisplacementTexture,*def_gpu,activeVoxel*sizeof(float4))) NR_CUDA_SAFE_CALL(hipBindTexture(0,maskTexture,*mask_gpu,activeVoxel*sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int))) const unsigned int Grid_reg_defField_compose = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_defField_compose))); dim3 G1(Grid_reg_defField_compose,Grid_reg_defField_compose,1); dim3 B1(Block_reg_defField_compose,1,1); hipLaunchKernelGGL(( reg_defField_compose_kernel), dim3(G1), dim3(B1), 0, 0, *defOut_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(voxelDisplacementTexture)) NR_CUDA_SAFE_CALL(hipUnbindTexture(maskTexture)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_getJacobianMatrix_gpu(nifti_image *deformationField, float4 **deformationField_gpu, float **jacobianMatrices_gpu) { const int3 referenceDim=make_int3(deformationField->nx,deformationField->ny,deformationField->nz); const float3 referenceSpacing=make_float3(deformationField->dx,deformationField->dy,deformationField->dz); const int voxelNumber = referenceDim.x*referenceDim.y*referenceDim.z; NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceImageDim,&referenceDim,sizeof(int3))) NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_ReferenceSpacing,&referenceSpacing,sizeof(float3))) mat33 reorient, desorient; reg_getReorientationMatrix(deformationField, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(hipMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(hipBindTexture(0,voxelDisplacementTexture,*deformationField_gpu,voxelNumber*sizeof(float4))) const unsigned int Grid_reg_defField_getJacobianMatrix = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_defField_getJacobianMatrix))); dim3 G1(Grid_reg_defField_getJacobianMatrix,Grid_reg_defField_getJacobianMatrix,1); dim3 B1(Block_reg_defField_getJacobianMatrix); hipLaunchKernelGGL(( reg_defField_getJacobianMatrix_kernel), dim3(G1),dim3(B1), 0, 0, *jacobianMatrices_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(hipUnbindTexture(voxelDisplacementTexture)) } /* *************************************************************** */ /* *************************************************************** */ #endif
8d359a2f2a39953e1863cc5099373cfc5670dda1.cu
/* * _reg_bspline_gpu.cu * * * Created by Marc Modat on 24/03/2009. * Copyright (c) 2009, University College London. All rights reserved. * Centre for Medical Image Computing (CMIC) * See the LICENSE.txt file in the nifty_reg root folder * */ #ifndef _REG_BSPLINE_GPU_CU #define _REG_BSPLINE_GPU_CU #include "_reg_localTransformation_gpu.h" #include "_reg_localTransformation_kernels.cu" #include <sys/time.h> /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_gpu(nifti_image *controlPointImage, nifti_image *reference, float4 **controlPointImageArray_d, float4 **positionFieldImageArray_d, int **mask_d, int activeVoxelNumber, bool bspline) { const int voxelNumber = reference->nx * reference->ny * reference->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(reference->nx, reference->ny, reference->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int useBSpline = bspline; const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / reference->dx, controlPointImage->dy / reference->dy, controlPointImage->dz / reference->dz); struct timeval t1, t2; double elapsedTime; gettimeofday(&t1, NULL); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_UseBSpline,&useBSpline,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ActiveVoxelNumber,&activeVoxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0, maskTexture, *mask_d, activeVoxelNumber*sizeof(int))) const unsigned int Grid_reg_bspline_getDeformationField = (unsigned int)ceilf(sqrtf((float)activeVoxelNumber/(float)(Block_reg_bspline_getDeformationField))); dim3 G1(Grid_reg_bspline_getDeformationField,Grid_reg_bspline_getDeformationField,1); dim3 B1(Block_reg_bspline_getDeformationField,1,1); reg_bspline_getDeformationField <<< G1, B1 >>>(*positionFieldImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)) gettimeofday(&t2, NULL); elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; printf("[NiftyReg F3D] reg_bspline_getDeformationField time =%f msec\n", elapsedTime); //printf("[NiftyReg F3D] reg_bspline_getDeformationField throughput =%f voxel per sec\n", (activeVoxelNumber*1000)/elapsedTime); return; } /* *************************************************************** */ /* *************************************************************** */ float reg_bspline_ApproxBendingEnergy_gpu(nifti_image *controlPointImage, float4 **controlPointImageArray_d) { const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxSecondDerivatives))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(Block_reg_bspline_getApproxSecondDerivatives,1,1); reg_bspline_getApproxSecondDerivatives <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) // Compute the bending energy from the second derivatives float *penaltyTerm_d; NR_CUDA_SAFE_CALL(cudaMalloc(&penaltyTerm_d, controlPointNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointGridMem)) const unsigned int Grid_reg_bspline_ApproxBendingEnergy = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxBendingEnergy))); dim3 G2(Grid_reg_bspline_ApproxBendingEnergy,Grid_reg_bspline_ApproxBendingEnergy,1); dim3 B2(Block_reg_bspline_getApproxBendingEnergy,1,1); reg_bspline_getApproxBendingEnergy_kernel <<< G2, B2 >>>(penaltyTerm_d); NR_CUDA_CHECK_KERNEL(G2,B2) NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d)) // Transfert the vales back to the CPU and average them float *penaltyTerm_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&penaltyTerm_h, controlPointNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(penaltyTerm_h, penaltyTerm_d, controlPointNumber*sizeof(float), cudaMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(cudaFree(penaltyTerm_d)) double penaltyValue=0.0; for(int i=0;i<controlPointNumber;i++) penaltyValue += penaltyTerm_h[i]; NR_CUDA_SAFE_CALL(cudaFreeHost((void *)penaltyTerm_h)) return (float)(penaltyValue/(3.0*(double)controlPointNumber)); } /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_ApproxBendingEnergyGradient_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeNMIGradientArray_d, float bendingEnergyWeight) { const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // First compute all the second derivatives float4 *secondDerivativeValues_d; NR_CUDA_SAFE_CALL(cudaMalloc(&secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_bspline_getApproxSecondDerivatives = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxSecondDerivatives))); dim3 G1(Grid_bspline_getApproxSecondDerivatives,Grid_bspline_getApproxSecondDerivatives,1); dim3 B1(Block_reg_bspline_getApproxSecondDerivatives,1,1); reg_bspline_getApproxSecondDerivatives <<< G1, B1 >>>(secondDerivativeValues_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) // Compute the gradient bendingEnergyWeight *= referenceImage->nx*referenceImage->ny*referenceImage->nz / (controlPointImage->nx*controlPointImage->ny*controlPointImage->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&bendingEnergyWeight,sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,secondDerivativesTexture, secondDerivativeValues_d, 6*controlPointNumber*sizeof(float4))) const unsigned int Grid_reg_bspline_getApproxBendingEnergyGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxBendingEnergyGradient))); dim3 G2(Grid_reg_bspline_getApproxBendingEnergyGradient,Grid_reg_bspline_getApproxBendingEnergyGradient,1); dim3 B2(Block_reg_bspline_getApproxBendingEnergyGradient,1,1); reg_bspline_getApproxBendingEnergyGradient_kernel <<< G2, B2 >>>(*nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G2,B2) NR_CUDA_SAFE_CALL(cudaUnbindTexture(secondDerivativesTexture)) NR_CUDA_SAFE_CALL(cudaFree(secondDerivativeValues_d)) return; } /* *************************************************************** */ /* *************************************************************** */ void reg_bspline_ComputeApproxJacobianValues(nifti_image *controlPointImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const int controlPointGridMem = controlPointNumber*sizeof(float4); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointGridMem)) // The Jacobian matrix is computed for every control point const unsigned int Grid_reg_bspline_getApproxJacobianValues = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_getApproxJacobianValues))); dim3 G1(Grid_reg_bspline_getApproxJacobianValues,Grid_reg_bspline_getApproxJacobianValues,1); dim3 B1(Block_reg_bspline_getApproxJacobianValues,1,1); reg_bspline_getApproxJacobianValues_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) } /* *************************************************************** */ void reg_bspline_ComputeJacobianValues(nifti_image *controlPointImage, nifti_image *referenceImage, float4 **controlPointImageArray_d, float **jacobianMatrices_d, float **jacobianDet_d) { // Need to reorient the Jacobian matrix using the header information - real to voxel conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) // Bind some variables const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,controlPointTexture, *controlPointImageArray_d, controlPointNumber*sizeof(float4))) // The Jacobian matrix is computed for every voxel const unsigned int Grid_reg_bspline_getJacobianValues = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_bspline_getJacobianValues))); dim3 G1(Grid_reg_bspline_getJacobianValues,Grid_reg_bspline_getJacobianValues,1); dim3 B1(Block_reg_bspline_getJacobianValues,1,1); reg_bspline_getJacobianValues_kernel<<< G1, B1>>>(*jacobianMatrices_d, *jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(controlPointTexture)) } /* *************************************************************** */ /* *************************************************************** */ double reg_bspline_ComputeJacobianPenaltyTerm_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx ) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum=(controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2); NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; jacSum=jacNumber; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) // The Jacobian determinant are squared and logged (might not be english but will do) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) const unsigned int Grid_reg_bspline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(Block_reg_bspline_logSquaredValues))); dim3 G1(Grid_reg_bspline_logSquaredValues,Grid_reg_bspline_logSquaredValues,1); dim3 B1(Block_reg_bspline_logSquaredValues,1,1); reg_bspline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet_d); NR_CUDA_CHECK_KERNEL(G1,B1) // Transfert the data back to the CPU float *jacobianDet_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&jacobianDet_h,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet_h,jacobianDet_d, jacNumber*sizeof(float), cudaMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) double penaltyTermValue=0.; for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i]; NR_CUDA_SAFE_CALL(cudaFreeHost(jacobianDet_h)) return penaltyTermValue/jacSum; } /* *************************************************************** */ void reg_bspline_ComputeJacobianPenaltyTermGradient_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, float4 **nodeNMIGradientArray_d, float jacobianWeight, bool approx) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(desorient.m[0][0],desorient.m[0][1],desorient.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(desorient.m[1][0],desorient.m[1][1],desorient.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(desorient.m[2][0],desorient.m[2][1],desorient.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) if(approx){ float weight=jacobianWeight; weight = jacobianWeight * (float)(referenceImage->nx * referenceImage->ny * referenceImage->nz) / (float)( controlPointImage->nx*controlPointImage->ny*controlPointImage->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&weight,sizeof(float))) const unsigned int Grid_reg_bspline_computeApproxJacGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_computeApproxJacGradient))); dim3 G1(Grid_reg_bspline_computeApproxJacGradient,Grid_reg_bspline_computeApproxJacGradient,1); dim3 B1(Block_reg_bspline_computeApproxJacGradient,1,1); reg_bspline_computeApproxJacGradient_kernel<<< G1, B1>>>(*nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_Weight,&jacobianWeight,sizeof(float))) const unsigned int Grid_reg_bspline_computeJacGradient = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_computeJacGradient))); dim3 G1(Grid_reg_bspline_computeJacGradient,Grid_reg_bspline_computeJacGradient,1); dim3 B1(Block_reg_bspline_computeJacGradient,1,1); reg_bspline_computeJacGradient_kernel<<< G1, B1>>>(*nodeNMIGradientArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) } /* *************************************************************** */ double reg_bspline_correctFolding_gpu(nifti_image *referenceImage, nifti_image *controlPointImage, float4 **controlPointImageArray_d, bool approx) { // The Jacobian matrices and determinants are computed float *jacobianMatrices_d; float *jacobianDet_d; int jacNumber; double jacSum; if(approx){ jacNumber=controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; jacSum = (controlPointImage->nx-2)*(controlPointImage->ny-2)*(controlPointImage->nz-2); NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeApproxJacobianValues(controlPointImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } else{ jacSum=jacNumber=referenceImage->nx*referenceImage->ny*referenceImage->nz; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianMatrices_d,9*jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet_d,jacNumber*sizeof(float))) reg_bspline_ComputeJacobianValues(controlPointImage, referenceImage, controlPointImageArray_d, &jacobianMatrices_d, &jacobianDet_d); } // Check if the Jacobian determinant average NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&jacNumber,sizeof(int))) float *jacobianDet2_d; NR_CUDA_SAFE_CALL(cudaMalloc(&jacobianDet2_d,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet2_d,jacobianDet_d,jacNumber*sizeof(float),cudaMemcpyDeviceToDevice)) const unsigned int Grid_reg_bspline_logSquaredValues = (unsigned int)ceilf(sqrtf((float)jacNumber/(float)(Block_reg_bspline_logSquaredValues))); dim3 G1(Grid_reg_bspline_logSquaredValues,Grid_reg_bspline_logSquaredValues,1); dim3 B1(Block_reg_bspline_logSquaredValues,1,1); reg_bspline_logSquaredValues_kernel<<< G1, B1>>>(jacobianDet2_d); NR_CUDA_CHECK_KERNEL(G1,B1) float *jacobianDet_h; NR_CUDA_SAFE_CALL(cudaMallocHost(&jacobianDet_h,jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaMemcpy(jacobianDet_h,jacobianDet2_d, jacNumber*sizeof(float), cudaMemcpyDeviceToHost)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet2_d)) double penaltyTermValue=0.; for(int i=0;i<jacNumber;++i) penaltyTermValue += jacobianDet_h[i]; NR_CUDA_SAFE_CALL(cudaFreeHost(jacobianDet_h)) penaltyTermValue /= jacSum; if(penaltyTermValue==penaltyTermValue){ NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) return penaltyTermValue; } // Need to desorient the Jacobian matrix using the header information - voxel to real conversion mat33 reorient, desorient; reg_getReorientationMatrix(controlPointImage, &desorient, &reorient); float3 temp=make_float3(desorient.m[0][0],desorient.m[0][1],desorient.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(desorient.m[1][0],desorient.m[1][1],desorient.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(desorient.m[2][0],desorient.m[2][1],desorient.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianDeterminantTexture, jacobianDet_d, jacNumber*sizeof(float))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,jacobianMatricesTexture, jacobianMatrices_d, 9*jacNumber*sizeof(float))) // Bind some variables const int controlPointNumber = controlPointImage->nx*controlPointImage->ny*controlPointImage->nz; const int3 controlPointImageDim = make_int3(controlPointImage->nx, controlPointImage->ny, controlPointImage->nz); const float3 controlPointSpacing = make_float3(controlPointImage->dx,controlPointImage->dy,controlPointImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointNumber,&controlPointNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointImageDim,&controlPointImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointSpacing,&controlPointSpacing,sizeof(float3))) if(approx){ const unsigned int Grid_reg_bspline_approxCorrectFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_approxCorrectFolding))); dim3 G1(Grid_reg_bspline_approxCorrectFolding,Grid_reg_bspline_approxCorrectFolding,1); dim3 B1(Block_reg_bspline_approxCorrectFolding,1,1); reg_bspline_approxCorrectFolding_kernel<<< G1, B1>>>(*controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } else{ const int voxelNumber = referenceImage->nx*referenceImage->ny*referenceImage->nz; const int3 referenceImageDim = make_int3(referenceImage->nx, referenceImage->ny, referenceImage->nz); const float3 controlPointVoxelSpacing = make_float3( controlPointImage->dx / referenceImage->dx, controlPointImage->dy / referenceImage->dy, controlPointImage->dz / referenceImage->dz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ControlPointVoxelSpacing,&controlPointVoxelSpacing,sizeof(float3))) const unsigned int Grid_reg_bspline_correctFolding = (unsigned int)ceilf(sqrtf((float)controlPointNumber/(float)(Block_reg_bspline_correctFolding))); dim3 G1(Grid_reg_bspline_correctFolding,Grid_reg_bspline_correctFolding,1); dim3 B1(Block_reg_bspline_correctFolding,1,1); reg_bspline_correctFolding_kernel<<< G1, B1>>>(*controlPointImageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianDeterminantTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(jacobianMatricesTexture)) NR_CUDA_SAFE_CALL(cudaFree(jacobianDet_d)) NR_CUDA_SAFE_CALL(cudaFree(jacobianMatrices_d)) return std::numeric_limits<double>::quiet_NaN(); } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFromDisplacement_gpu( nifti_image *image, float4 **imageArray_d) { // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDeformationFromDisplacement = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(512))); dim3 G1(Grid_reg_getDeformationFromDisplacement,Grid_reg_getDeformationFromDisplacement,1); dim3 B1(512,1,1); reg_getDeformationFromDisplacement_kernel<<< G1, B1>>>(*imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDisplacementFromDeformation_gpu( nifti_image *image, float4 **imageArray_d) { // Bind the qform or sform mat44 temp_mat=image->qto_xyz; if(image->sform_code>0) temp_mat=image->sto_xyz; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) const int voxelNumber=image->nx*image->ny*image->nz; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) const int3 imageDim=make_int3(image->nx,image->ny,image->nz); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&imageDim,sizeof(int3))) const unsigned int Grid_reg_getDisplacementFromDeformation = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(512))); dim3 G1(Grid_reg_getDisplacementFromDeformation,Grid_reg_getDisplacementFromDeformation,1); dim3 B1(512,1,1); reg_getDisplacementFromDeformation_kernel<<< G1, B1>>>(*imageArray_d); NR_CUDA_CHECK_KERNEL(G1,B1) } /* *************************************************************** */ /* *************************************************************** */ void reg_getDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h, nifti_image *def_h, float4 **cpp_gpu, float4 **def_gpu, float4 **interDef_gpu, int **mask_gpu, int activeVoxel, bool approxComp) { if(approxComp){ fprintf(stderr, "[NiftyReg] reg_getDeformationFieldFromVelocityGrid_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR Approximation not implemented yet on the GPU\n"); exit(1); } const int controlPointNumber = cpp_h->nx * cpp_h->ny * cpp_h->nz; const int voxelNumber = def_h->nx * def_h->ny * def_h->nz; if(voxelNumber != activeVoxel){ fprintf(stderr, "[NiftyReg] reg_getDeformationFieldFromVelocityGrid_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR The mask must contains all voxel\n"); exit(1); } // A scaled down velocity field is first store float4 *scaledVelocityField_d=NULL; NR_CUDA_SAFE_CALL(cudaMalloc(&scaledVelocityField_d,controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaMemcpy(scaledVelocityField_d,*cpp_gpu,controlPointNumber*sizeof(float4),cudaMemcpyDeviceToDevice)) reg_getDisplacementFromDeformation_gpu(cpp_h, &scaledVelocityField_d); reg_multiplyValue_gpu(controlPointNumber,&scaledVelocityField_d,1.f/cpp_h->pixdim[5]); reg_getDeformationFromDisplacement_gpu(cpp_h, &scaledVelocityField_d); if(!approxComp){ float4 *tempDef=NULL; float4 *currentDefPtr0=NULL; float4 *currentDefPtr1=NULL; if(interDef_gpu==NULL){ NR_CUDA_SAFE_CALL(cudaMalloc(&tempDef,voxelNumber*sizeof(float4))) currentDefPtr0 = *def_gpu; currentDefPtr1 = tempDef; } else{ currentDefPtr0 = interDef_gpu[0]; currentDefPtr1 = interDef_gpu[1]; } reg_bspline_gpu(cpp_h, def_h, &scaledVelocityField_d, &currentDefPtr0, mask_gpu, activeVoxel, true); for(unsigned int i=0;i<cpp_h->pixdim[5];++i){ NR_CUDA_SAFE_CALL(cudaMemcpy(currentDefPtr1,currentDefPtr0,voxelNumber*sizeof(float4),cudaMemcpyDeviceToDevice)) if(interDef_gpu==NULL){ reg_defField_compose_gpu(def_h, &currentDefPtr1, &currentDefPtr0, mask_gpu, activeVoxel); } else{ reg_defField_compose_gpu(def_h, &currentDefPtr0, &currentDefPtr1, mask_gpu, activeVoxel); if(i==cpp_h->pixdim[5]-2){ currentDefPtr0 = interDef_gpu[i+1]; currentDefPtr1 = *def_gpu; } else if(i<cpp_h->pixdim[5]-2){ currentDefPtr0 = interDef_gpu[i+1]; currentDefPtr1 = interDef_gpu[i+2]; } } } if(tempDef!=NULL) NR_CUDA_SAFE_CALL(cudaFree(tempDef)); } NR_CUDA_SAFE_CALL(cudaFree(scaledVelocityField_d)) } /* *************************************************************** */ /* *************************************************************** */ void reg_getInverseDeformationFieldFromVelocityGrid_gpu(nifti_image *cpp_h, nifti_image *def_h, float4 **cpp_gpu, float4 **def_gpu, float4 **interDef_gpu, int **mask_gpu, int activeVoxel, bool approxComp) { const int controlPointNumber = cpp_h->nx * cpp_h->ny * cpp_h->nz; // The CPP file is first negated float4 *invertedCpp_gpu=NULL; NR_CUDA_SAFE_CALL(cudaMalloc(&invertedCpp_gpu,controlPointNumber*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaMemcpy(invertedCpp_gpu,*cpp_gpu,controlPointNumber*sizeof(float4),cudaMemcpyDeviceToDevice)) reg_getDisplacementFromDeformation_gpu(cpp_h, &invertedCpp_gpu); reg_multiplyValue_gpu(controlPointNumber,&invertedCpp_gpu,-1.f); reg_getDeformationFromDisplacement_gpu(cpp_h, &invertedCpp_gpu); reg_getDeformationFieldFromVelocityGrid_gpu(cpp_h, def_h, &invertedCpp_gpu, def_gpu, interDef_gpu, mask_gpu, activeVoxel, approxComp); NR_CUDA_SAFE_CALL(cudaFree(invertedCpp_gpu)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_compose_gpu(nifti_image *def, float4 **def_gpu, float4 **defOut_gpu, int **mask_gpu, int activeVoxel) { const int voxelNumber=def->nx*def->ny*def->nz; if(voxelNumber != activeVoxel){ fprintf(stderr, "[NiftyReg] reg_defField_compose_gpu\n"); fprintf(stderr, "[NiftyReg] ERROR no mask can be used\n"); exit(1); } // Bind the qform or sform mat44 temp_mat=def->qto_ijk; if(def->sform_code>0) temp_mat=def->sto_ijk; float4 temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1b,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2b,&temp,sizeof(float4))) temp_mat=def->qto_xyz; if(def->sform_code>0) temp_mat=def->sto_xyz; temp=make_float4(temp_mat.m[0][0],temp_mat.m[0][1],temp_mat.m[0][2],temp_mat.m[0][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[1][0],temp_mat.m[1][1],temp_mat.m[1][2],temp_mat.m[1][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1c,&temp,sizeof(float4))) temp=make_float4(temp_mat.m[2][0],temp_mat.m[2][1],temp_mat.m[2][2],temp_mat.m[2][3]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2c,&temp,sizeof(float4))) const int3 referenceImageDim=make_int3(def->nx,def->ny,def->nz); NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDisplacementTexture,*def_gpu,activeVoxel*sizeof(float4))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,maskTexture,*mask_gpu,activeVoxel*sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceImageDim,sizeof(int))) const unsigned int Grid_reg_defField_compose = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_defField_compose))); dim3 G1(Grid_reg_defField_compose,Grid_reg_defField_compose,1); dim3 B1(Block_reg_defField_compose,1,1); reg_defField_compose_kernel<<< G1, B1>>>(*defOut_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDisplacementTexture)) NR_CUDA_SAFE_CALL(cudaUnbindTexture(maskTexture)) } /* *************************************************************** */ /* *************************************************************** */ void reg_defField_getJacobianMatrix_gpu(nifti_image *deformationField, float4 **deformationField_gpu, float **jacobianMatrices_gpu) { const int3 referenceDim=make_int3(deformationField->nx,deformationField->ny,deformationField->nz); const float3 referenceSpacing=make_float3(deformationField->dx,deformationField->dy,deformationField->dz); const int voxelNumber = referenceDim.x*referenceDim.y*referenceDim.z; NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_VoxelNumber,&voxelNumber,sizeof(int))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceImageDim,&referenceDim,sizeof(int3))) NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_ReferenceSpacing,&referenceSpacing,sizeof(float3))) mat33 reorient, desorient; reg_getReorientationMatrix(deformationField, &desorient, &reorient); float3 temp=make_float3(reorient.m[0][0],reorient.m[0][1],reorient.m[0][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix0,&temp,sizeof(float3))) temp=make_float3(reorient.m[1][0],reorient.m[1][1],reorient.m[1][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix1,&temp,sizeof(float3))) temp=make_float3(reorient.m[2][0],reorient.m[2][1],reorient.m[2][2]); NR_CUDA_SAFE_CALL(cudaMemcpyToSymbol(c_AffineMatrix2,&temp,sizeof(float3))) NR_CUDA_SAFE_CALL(cudaBindTexture(0,voxelDisplacementTexture,*deformationField_gpu,voxelNumber*sizeof(float4))) const unsigned int Grid_reg_defField_getJacobianMatrix = (unsigned int)ceilf(sqrtf((float)voxelNumber/(float)(Block_reg_defField_getJacobianMatrix))); dim3 G1(Grid_reg_defField_getJacobianMatrix,Grid_reg_defField_getJacobianMatrix,1); dim3 B1(Block_reg_defField_getJacobianMatrix); reg_defField_getJacobianMatrix_kernel<<<G1,B1>>>(*jacobianMatrices_gpu); NR_CUDA_CHECK_KERNEL(G1,B1) NR_CUDA_SAFE_CALL(cudaUnbindTexture(voxelDisplacementTexture)) } /* *************************************************************** */ /* *************************************************************** */ #endif
8b6c96690a4942c790104d020d9e026232a6abfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgetf2.cu, normal z -> d, Sun Nov 20 20:20:30 2016 */ #include "magma_internal.h" #define dger_bs 512 // 512 is max threads for 1.x cards void magma_dgetf2_swap( magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_dscal_dger( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** DGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_getf2 *******************************************************************************/ extern "C" magma_int_t magma_dgetf2_gpu( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > dger_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { hipDeviceSetCacheConfig( hipFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_idamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_dgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_dscal_dger( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_dswap? #define dswap_bs 64 /******************************************************************************/ __global__ void kernel_dswap(int n, double *x, int i, int j, int incx) { int id = blockIdx.x * dswap_bs + threadIdx.x; if (id < n) { double tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } /******************************************************************************/ void magma_dgetf2_swap( magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* dswap two row vectors: ith and jth */ dim3 threads( dswap_bs ); dim3 grid( magma_ceildiv( n, dswap_bs ) ); hipLaunchKernelGGL(( kernel_dswap) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, x, i, j, incx); } /******************************************************************************/ // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double shared_data[]; /******************************************************************************/ __global__ void kernel_dscal_dger(int m, int n, double *A, int lda) { double *shared_y = shared_data; int tid = blockIdx.x * dger_bs + threadIdx.x; double reg = MAGMA_D_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_D_DIV(MAGMA_D_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_D_NEG_ONE) * shared_y[i] * reg; } } } /******************************************************************************/ void magma_dscal_dger( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges dscal and dger 1) dscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a dger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( dger_bs ); dim3 grid( magma_ceildiv( m, dger_bs ) ); size_t shared_size = sizeof(double)*(n); hipLaunchKernelGGL(( kernel_dscal_dger) , dim3(grid), dim3(threads), shared_size, queue->cuda_stream() , m, n, dA, ldda); }
8b6c96690a4942c790104d020d9e026232a6abfe.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgetf2.cu, normal z -> d, Sun Nov 20 20:20:30 2016 */ #include "magma_internal.h" #define dger_bs 512 // 512 is max threads for 1.x cards void magma_dgetf2_swap( magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ); void magma_dscal_dger( magma_int_t m, magma_int_t n, double *dA, magma_int_t ldda, magma_queue_t ); // TODO: this function could be in .cpp file -- it has no CUDA code in it. /***************************************************************************//** DGETF2 computes an LU factorization of a general m-by-n matrix A using partial pivoting with row interchanges. The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 2 BLAS version of the algorithm. Arguments --------- @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] n INTEGER The number of columns of the matrix A. N >= 0 and N <= 1024. On CUDA architecture 1.x cards, N <= 512. @param[in,out] dA DOUBLE PRECISION array, dimension (LDDA,N) On entry, the m by n matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] ipiv INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[in] queue magma_queue_t Queue to execute in. @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -k, the k-th argument had an illegal value - > 0: if INFO = k, U(k,k) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @ingroup magma_getf2 *******************************************************************************/ extern "C" magma_int_t magma_dgetf2_gpu( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_int_t *ipiv, magma_queue_t queue, magma_int_t *info ) { #define dA(i, j) (dA + (i) + (j)*ldda) *info = 0; if (m < 0) { *info = -1; } else if (n < 0 || n > dger_bs) { *info = -2; } else if (ldda < max(1,m)) { *info = -4; } if (*info != 0) { magma_xerbla( __func__, -(*info) ); return *info; } // Quick return if possible if (m == 0 || n == 0) { return *info; } magma_int_t min_mn = min(m, n); magma_int_t j, jp; for (j=0; j < min_mn; j++) { cudaDeviceSetCacheConfig( cudaFuncCachePreferShared ); // Find pivot and test for singularity. jp = j - 1 + magma_idamax( m-j, dA(j,j), 1, queue ); ipiv[j] = jp + 1; // ipiv uses Fortran one-based index // Can't check value of dA since it is on GPU //if ( dA(jp, j) != 0.0) { cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); // Apply the interchange to columns 1:N. if (jp != j) { magma_dgetf2_swap( n, dA, j, jp, ldda, queue ); } // Compute elements J+1:M of J-th column. if (j < m) { magma_dscal_dger( m-j, n-j, dA(j, j), ldda, queue ); } //} //else if (*info == 0) { // *info = j; //} } return *info; } // =========================================================================== // TODO: use standard BLAS magma_dswap? #define dswap_bs 64 /******************************************************************************/ __global__ void kernel_dswap(int n, double *x, int i, int j, int incx) { int id = blockIdx.x * dswap_bs + threadIdx.x; if (id < n) { double tmp = x[i + incx*id]; x[i + incx*id] = x[j + incx*id]; x[j + incx*id] = tmp; } } /******************************************************************************/ void magma_dgetf2_swap( magma_int_t n, double *x, magma_int_t i, magma_int_t j, magma_int_t incx, magma_queue_t queue ) { /* dswap two row vectors: ith and jth */ dim3 threads( dswap_bs ); dim3 grid( magma_ceildiv( n, dswap_bs ) ); kernel_dswap <<< grid, threads, 0, queue->cuda_stream() >>> (n, x, i, j, incx); } /******************************************************************************/ // dynamically allocated shared memory, set to size n when the kernel is launched. // See CUDA Guide B.2.3 extern __shared__ double shared_data[]; /******************************************************************************/ __global__ void kernel_dscal_dger(int m, int n, double *A, int lda) { double *shared_y = shared_data; int tid = blockIdx.x * dger_bs + threadIdx.x; double reg = MAGMA_D_ZERO; if (threadIdx.x < n) { shared_y[threadIdx.x] = A[lda * threadIdx.x]; } __syncthreads(); if (tid < m && tid > 0) { reg = A[tid]; reg *= MAGMA_D_DIV(MAGMA_D_ONE, shared_y[0]); A[tid] = reg; #pragma unroll for (int i=1; i < n; i++) { A[tid + i*lda] += (MAGMA_D_NEG_ONE) * shared_y[i] * reg; } } } /******************************************************************************/ void magma_dscal_dger( magma_int_t m, magma_int_t n, magmaDouble_ptr dA, magma_int_t ldda, magma_queue_t queue ) { /* Specialized kernel that merges dscal and dger 1) dscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a dger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ dim3 threads( dger_bs ); dim3 grid( magma_ceildiv( m, dger_bs ) ); size_t shared_size = sizeof(double)*(n); kernel_dscal_dger <<< grid, threads, shared_size, queue->cuda_stream() >>> (m, n, dA, ldda); }
eb2f828bdb54e7d101d71e18ece3fb5af6fe43fc.hip
// !!! This is a file automatically generated by hipify!!! // CUDA program to perform matrix addition of two large integer matrices #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> //Number of threads in each dimension of the block. #define THREAD_NUM 16 // CUDA kernel __global__ void matrixAdd(float *A, float *B, float *C, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int num = n; int i = row * num + col; if (row < num && col < num) { C[i] = A[i] + B[i]; } } // Main int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int num = 512; size_t size = num * num * sizeof(float); printf("\n\tMatrix addition of two %d * %d matrices\n\n", num, num); float h_A[num][num], h_B[num][num], h_C[num][num]; printf("Initializing host input vectors...\n"); for (int i = 0; i < num; i++) { for (int j = 0; j < num; j++) { h_A[i][j] = rand(); h_B[i][j] = rand(); } } // Allocate device memory (with error checking) printf("Allocating device memory...\n"); float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy from host to device printf("Copying input from host to device...\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch CUDA Kernel printf("Launching vector addition kernel...\n"); dim3 dimBlock(THREAD_NUM, THREAD_NUM, 1); dim3 dimGrid((int) ceil((float)num/dimBlock.x), (int) ceil((float)num/dimBlock.y), 1); hipLaunchKernelGGL(( matrixAdd), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, num); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy result from device to host printf("Copying result from device to host...\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free device global memory printf("Freeing device memory...\n"); err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done.\n\n"); return 0; }
eb2f828bdb54e7d101d71e18ece3fb5af6fe43fc.cu
// CUDA program to perform matrix addition of two large integer matrices #include <stdio.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> //Number of threads in each dimension of the block. #define THREAD_NUM 16 // CUDA kernel __global__ void matrixAdd(float *A, float *B, float *C, int n) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int num = n; int i = row * num + col; if (row < num && col < num) { C[i] = A[i] + B[i]; } } // Main int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int num = 512; size_t size = num * num * sizeof(float); printf("\n\tMatrix addition of two %d * %d matrices\n\n", num, num); float h_A[num][num], h_B[num][num], h_C[num][num]; printf("Initializing host input vectors...\n"); for (int i = 0; i < num; i++) { for (int j = 0; j < num; j++) { h_A[i][j] = rand(); h_B[i][j] = rand(); } } // Allocate device memory (with error checking) printf("Allocating device memory...\n"); float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy from host to device printf("Copying input from host to device...\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch CUDA Kernel printf("Launching vector addition kernel...\n"); dim3 dimBlock(THREAD_NUM, THREAD_NUM, 1); dim3 dimGrid((int) ceil((float)num/dimBlock.x), (int) ceil((float)num/dimBlock.y), 1); matrixAdd<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, num); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy result from device to host printf("Copying result from device to host...\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free device global memory printf("Freeing device memory...\n"); err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done.\n\n"); return 0; }