hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
76a4b331fcdd30cedb395c05e2b2b45530e9e83c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "back.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *h_out_d = NULL; hipMalloc(&h_out_d, XSIZE*YSIZE); double *weights_out_d = NULL; hipMalloc(&weights_out_d, XSIZE*YSIZE); double *weights_h_d = NULL; hipMalloc(&weights_h_d, XSIZE*YSIZE); double *weights_in_d = NULL; hipMalloc(&weights_in_d, XSIZE*YSIZE); double *outputs_d = NULL; hipMalloc(&outputs_d, XSIZE*YSIZE); double *deltas_h_d = NULL; hipMalloc(&deltas_h_d, XSIZE*YSIZE); double *deltas_h_new_d = NULL; hipMalloc(&deltas_h_new_d, XSIZE*YSIZE); double *deltas_o_d = NULL; hipMalloc(&deltas_o_d, XSIZE*YSIZE); double *weights_in_delta_d = NULL; hipMalloc(&weights_in_delta_d, XSIZE*YSIZE); double *weights_out_delta_d = NULL; hipMalloc(&weights_out_delta_d, XSIZE*YSIZE); double *weights_h_delta_d = NULL; hipMalloc(&weights_h_delta_d, XSIZE*YSIZE); int height = YSIZE; int inputs = 1; int outputs = 1; int layers = 1; double *training_in_d = NULL; hipMalloc(&training_in_d, XSIZE*YSIZE); double *training_out_d = NULL; hipMalloc(&training_out_d, XSIZE*YSIZE); int sample = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( back), dim3(gridBlock),dim3(threadBlock), 0, 0, h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( back), dim3(gridBlock),dim3(threadBlock), 0, 0, h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( back), dim3(gridBlock),dim3(threadBlock), 0, 0, h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
76a4b331fcdd30cedb395c05e2b2b45530e9e83c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "back.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *h_out_d = NULL; cudaMalloc(&h_out_d, XSIZE*YSIZE); double *weights_out_d = NULL; cudaMalloc(&weights_out_d, XSIZE*YSIZE); double *weights_h_d = NULL; cudaMalloc(&weights_h_d, XSIZE*YSIZE); double *weights_in_d = NULL; cudaMalloc(&weights_in_d, XSIZE*YSIZE); double *outputs_d = NULL; cudaMalloc(&outputs_d, XSIZE*YSIZE); double *deltas_h_d = NULL; cudaMalloc(&deltas_h_d, XSIZE*YSIZE); double *deltas_h_new_d = NULL; cudaMalloc(&deltas_h_new_d, XSIZE*YSIZE); double *deltas_o_d = NULL; cudaMalloc(&deltas_o_d, XSIZE*YSIZE); double *weights_in_delta_d = NULL; cudaMalloc(&weights_in_delta_d, XSIZE*YSIZE); double *weights_out_delta_d = NULL; cudaMalloc(&weights_out_delta_d, XSIZE*YSIZE); double *weights_h_delta_d = NULL; cudaMalloc(&weights_h_delta_d, XSIZE*YSIZE); int height = YSIZE; int inputs = 1; int outputs = 1; int layers = 1; double *training_in_d = NULL; cudaMalloc(&training_in_d, XSIZE*YSIZE); double *training_out_d = NULL; cudaMalloc(&training_out_d, XSIZE*YSIZE); int sample = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); back<<<gridBlock,threadBlock>>>(h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { back<<<gridBlock,threadBlock>>>(h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { back<<<gridBlock,threadBlock>>>(h_out_d,weights_out_d,weights_h_d,weights_in_d,outputs_d,deltas_h_d,deltas_h_new_d,deltas_o_d,weights_in_delta_d,weights_out_delta_d,weights_h_delta_d,height,inputs,outputs,layers,training_in_d,training_out_d,sample); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
de998cd42a4cb28bec678bd78f91ba46da599586.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ /* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <string.h> #include <assert.h> #include <sys/mman.h> #include<stdio.h> #include <unistd.h> #include <sys/types.h> #include "cpu_ipc.cu.h" #include "fs_constants.h" #include "fs_initializer.cu.h" #include "gpufs_con_lib.h" #include "host_loop.h" void fd2name(const int fd, char* name, int namelen){ char slink[100]; pid_t me=getpid(); name[0]=0; sprintf(slink,"/proc/%d/fd/0",me); int s=readlink(slink,name, namelen-1); if (s>=0) name[s]='\0'; } double transfer_time=0; bool debug_expecting_close=0; double total_stat=0; double total_stat1=0; void open_loop(volatile GPUGlobals* globals,int gpuid) { char* use_gpufs_lib=getenv("USE_GPUFS_DEVICE"); for (int i=0;i<FSTABLE_SIZE;i++) { char filename[FILENAME_SIZE]; volatile CPU_IPC_OPEN_Entry* e=&globals->cpu_ipcOpenQueue->entries[i]; // we are doing open if (e->status == CPU_IPC_PENDING && e->cpu_fd < 0 ) { double vvvv=_timestamp(); memcpy(filename,(char*)e->filename,FILENAME_SIZE); printf("[gpufs]: open:(%s)\n", filename); // OPEN if (e->flags&O_GWRONCE) { e->flags=O_RDWR|O_CREAT; } char pageflush=0; int cpu_fd=-1; struct stat s; if (e->do_not_open){ if ( stat(filename,&s) <0 ) { fprintf(stderr," problem with STAT file %s on CPU: %s\n",filename, strerror(errno));} // fprintf(stderr,"Do not open for inode %d, time %d\n",s.st_ino, s.st_mtime); }else{ if (use_gpufs_lib) cpu_fd=gpufs_file_open(globals->gpufs_fd,gpuid,filename,e->flags,S_IRUSR|S_IWUSR,&pageflush); else { cpu_fd=open(filename, e->flags,S_IRUSR|S_IWUSR);} if (cpu_fd < 0) { fprintf(stderr, "Problem with opening file %s on CPU: %s \n ",filename, strerror(errno)); } if (fstat(cpu_fd,&s)) { fprintf(stderr,"Problem with fstat the file %s on CPU: %s \n ",filename,strerror(errno));} } //fprintf(stderr, "FD %d, inode %ld, size %ld, Found file %s\n",i, s.st_ino, s.st_size, filename); e->cpu_fd=cpu_fd; e->flush_cache=pageflush; e->cpu_inode=s.st_ino; e->size=s.st_size; e->cpu_timestamp=s.st_ctime; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); total_stat+=(_timestamp()-vvvv); } if (e->status == CPU_IPC_PENDING && e->cpu_fd>=0 ) { double vvvv1=_timestamp(); // do close // fprintf(stderr, "FD %d, closing file %s\n",i, e->filename); if (use_gpufs_lib) { if (e->is_dirty){ // if dirty, update gpufs device, but keep the file open e->cpu_fd=gpufs_file_close_stay_open(globals->gpufs_fd,gpuid,e->cpu_fd); }else{ e->cpu_fd=gpufs_file_close(globals->gpufs_fd,gpuid,e->cpu_fd); } gpufs_drop_residence(globals->gpufs_fd, gpuid, e->drop_residence_inode); }else{ if (!e->is_dirty){ e->cpu_fd=close(e->cpu_fd); } } __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); total_stat1+=_timestamp()-vvvv1; } } } Page diff_page; uchar* diff_and_merge(const Page* page, uint req_cpu_fd, size_t req_size, size_t req_file_offset){ struct stat s; if (fstat(req_cpu_fd,&s)) s.st_size=0; int data_read; if ( s.st_size<req_file_offset) { data_read=0; } else { data_read=pread(req_cpu_fd,&diff_page,req_size,req_file_offset); } //fprintf(stderr,"read %d, offset %d\n",data_read,req_file_offset); if (data_read<0) { perror("pread failed while diff\n"); req_size=(size_t)-1; } //if (data_read==0) fprintf(stderr,"empty read\n"); // uchar* tmp=(uchar*)page; // uchar* data_ptr=(uchar*)&diff_page; // if (data_read==0){ // data_ptr=tmp; // copy directly from the buffer // }else typedef char v32c __attribute__ ((vector_size (16))); uchar* data_ptr=(uchar*)page; v32c* A_v=(v32c*)data_ptr; v32c* B_v=(v32c*)&diff_page;; if (data_read>0){ // perform diff-ed write // for(int zzz=0;zzz<data_read;zzz++) // { // if (tmp[zzz]) { // ((uchar*)diff_page)[zzz]=tmp[zzz]; /// } // } int left=data_read%sizeof(v32c); for(int zzz=0;zzz<(data_read/sizeof(v32c)+(left!=0));zzz++) { // new is new OR old //data_ptr[zzz]=data_ptr[zzz]|((uchar*)diff_page)[zzz]; A_v[zzz]=A_v[zzz]|B_v[zzz]; } //memcpy(((char*)&diff_page)+data_read,tmp+data_read,req_size-data_read); } return data_ptr; } void async_close_loop(volatile GPUGlobals* globals) { async_close_rb_t* rb=globals->async_close_rb; char* no_files=getenv("GPU_NOFILE"); /* data must be read synchronously from GPU, but then __can be__ written asynchronously by CPU. -- TODO! The goal is to read as much as possible from GPU in order to make CPU close as fast as possible */ Page* page=globals->streamMgr->async_close_scratch; page_md_t md; while(rb->dequeue(page,&md, globals->streamMgr->async_close_stream)){ // drain the ringbuffer int res; if (md.last_page==1){ // that's the last fprintf(stderr,"closing dirty file %d\n",md.cpu_fd); res=close(md.cpu_fd); if (res<0) perror("Async close failed, and nobody to report to:\n"); }else{ if (!no_files){ //fprintf(stderr,"writing async close at offset: %d content; %d\n",md.file_offset,md.content_size); uchar* to_write; if (md.type == RW_IPC_DIFF ){ to_write= diff_and_merge(page, md.cpu_fd, md.content_size, md.file_offset); }else{ to_write=(uchar*)page; } int ws=pwrite(md.cpu_fd, to_write, md.content_size,md.file_offset); if (ws!=md.content_size){ perror("Writing while async close failed, and nobody to report to:\n"); } } } } } int max_req=0; int report=0; void rw_loop(volatile GPUGlobals* globals) { char* no_pci=getenv("GPU_NOPCI"); if (no_pci&&!report) fprintf(stderr,"Warning: no data will be transferred in and out of the GPU\n"); char* no_files=getenv("GPU_NOFILE"); if (no_files&&!report) fprintf(stderr,"Warning: no file reads/writes will be performed\n"); report=1; int cur_req=0; for (int i=0;i<RW_IPC_SIZE;i++) { volatile CPU_IPC_RW_Entry* e=&globals->cpu_ipcRWQueue->entries[i]; if(e->status == CPU_IPC_PENDING) { cur_req++; /* fprintf(stderr, "FD %d, cpu_fd %d, buf_offset %d, size " "%d, type %s, ret_val %d\n",i, e->cpu_fd, e->buffer_offset, e->size, e->type==RW_IPC_READ?"read":"write", e->return_value ); */ int req_cpu_fd =e->cpu_fd; size_t req_buffer_offset =e->buffer_offset; size_t req_file_offset =e->file_offset; size_t req_size =e->size; int req_type =e->type; assert(req_type == RW_IPC_READ || req_type == RW_IPC_WRITE || req_type == RW_IPC_DIFF || req_type == RW_IPC_TRUNC ); if (req_type!=RW_IPC_TRUNC){ assert(req_cpu_fd>=0 && req_size>0 ); } if(globals->streamMgr->task_array[i]!=-1) { // we only need to check the stream hipError_t cuda_status= hipStreamQuery(globals->streamMgr->memStream[i]); if ( hipErrorNotReady == cuda_status ) { // rush to the next request, this one is not ready continue; } if ( cuda_status != hipSuccess) { fprintf(stderr, "Error in the host loop.\n "); hipError_t error = hipDeviceSynchronize(); fprintf(stderr,"Device failed, CUDA error message is: %s\n\n", hipGetErrorString(error)); exit(-1); } // we are here only if success } printf("[gpufs] req:(%d)\n", req_type); switch(req_type) { case RW_IPC_READ: { // read int cpu_read_size=0; if (globals->streamMgr->task_array[i]==-1) // the request only started to be served { if(!no_files){ transfer_time-=_timestamp(); cpu_read_size=pread(req_cpu_fd, globals->streamMgr->scratch[i], req_size,req_file_offset); transfer_time+=_timestamp(); }else{ cpu_read_size=req_size; } char fname[256]; if (cpu_read_size < 0) { fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Problem with reading file %s on CPU: %s \n ", fname, strerror(errno)); } // if (cpu_read_size != req_size ) { fprintf(stderr, "Read %d required %d on CPU\n ", cpu_read_size, req_size); } // if (cpu_read_size ==0 ) { fprintf(stderr,"Nothing has been read\n");} e->return_value=cpu_read_size; if (cpu_read_size > 0) { globals->streamMgr->task_array[i]=req_type; if (!no_pci){ CUDA_SAFE_CALL(hipMemcpyAsync(((char*)globals->rawStorage)+req_buffer_offset, globals->streamMgr->scratch[i], cpu_read_size,hipMemcpyHostToDevice,globals->streamMgr->memStream[i])); } } } // if read failed or we did not update cpu_read_size since we didn't take the previous if if (cpu_read_size <=0) { // complete the request globals->streamMgr->task_array[i]=-1; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; case RW_IPC_TRUNC: e->return_value=ftruncate(req_cpu_fd,0); __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); break; case RW_IPC_DIFF: { if (globals->streamMgr->task_array[i]==-1) { globals->streamMgr->task_array[i]=req_type; // enqueue if (!no_pci){ // fprintf(stderr,"RW_IPC_DIFF buf_offset %llu, size %llu\n", req_buffer_offset, req_size); CUDA_SAFE_CALL(hipMemcpyAsync( globals->streamMgr->scratch[i], ((char*)globals->rawStorage)+req_buffer_offset, req_size,hipMemcpyDeviceToHost,globals->streamMgr->memStream[i])); } }else{ globals->streamMgr->task_array[i]=-1; // request completion if (!no_files){ uchar* to_write=diff_and_merge((Page*)globals->streamMgr->scratch[i],req_cpu_fd,req_size,req_file_offset); int res=pwrite(req_cpu_fd,to_write,req_size,req_file_offset); if (res!=req_size) { perror("pwrite failed on diff\n"); req_size=(size_t)-1; } }// end of no_files e->return_value=req_size; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; case RW_IPC_WRITE: { if (globals->streamMgr->task_array[i]==-1) { if (!no_pci){ CUDA_SAFE_CALL(hipMemcpyAsync(globals->streamMgr->scratch[i], ((char*)globals->rawStorage)+req_buffer_offset, req_size,hipMemcpyDeviceToHost,globals->streamMgr->memStream[i])); } globals->streamMgr->task_array[i]=req_type; // enqueue }else{ globals->streamMgr->task_array[i]=-1; // compelte int cpu_write_size=req_size; if(!no_files){ cpu_write_size=pwrite(req_cpu_fd, globals->streamMgr->scratch[i], req_size,req_file_offset); } if (cpu_write_size < 0) { char fname[256]; fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Problem with writing file %s on CPU: %s \n ",fname, strerror(errno)); } if (cpu_write_size != req_size ) { char fname[256]; fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Wrote less than expected on CPU for file %s\n ",fname); } e->return_value=cpu_write_size; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; default: assert(NULL); } } } if (max_req<cur_req) max_req=cur_req; } void logGPUfsDone() { fprintf(stderr,"kernel is complete\n"); fprintf(stderr,"Max pending requests: %d\n",max_req); fprintf(stderr,"Transfer time: %.3f\n",transfer_time); transfer_time=0; }
de998cd42a4cb28bec678bd78f91ba46da599586.cu
/* Copyright (C) 2016 Yusuke Suzuki <[email protected]> Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ /* * This expermental software is provided AS IS. * Feel free to use/modify/distribute, * If used, please retain this disclaimer and cite * "GPUfs: Integrating a file system with GPUs", * M Silberstein,B Ford,I Keidar,E Witchel * ASPLOS13, March 2013, Houston,USA */ #include <errno.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <string.h> #include <assert.h> #include <sys/mman.h> #include<stdio.h> #include <unistd.h> #include <sys/types.h> #include "cpu_ipc.cu.h" #include "fs_constants.h" #include "fs_initializer.cu.h" #include "gpufs_con_lib.h" #include "host_loop.h" void fd2name(const int fd, char* name, int namelen){ char slink[100]; pid_t me=getpid(); name[0]=0; sprintf(slink,"/proc/%d/fd/0",me); int s=readlink(slink,name, namelen-1); if (s>=0) name[s]='\0'; } double transfer_time=0; bool debug_expecting_close=0; double total_stat=0; double total_stat1=0; void open_loop(volatile GPUGlobals* globals,int gpuid) { char* use_gpufs_lib=getenv("USE_GPUFS_DEVICE"); for (int i=0;i<FSTABLE_SIZE;i++) { char filename[FILENAME_SIZE]; volatile CPU_IPC_OPEN_Entry* e=&globals->cpu_ipcOpenQueue->entries[i]; // we are doing open if (e->status == CPU_IPC_PENDING && e->cpu_fd < 0 ) { double vvvv=_timestamp(); memcpy(filename,(char*)e->filename,FILENAME_SIZE); printf("[gpufs]: open:(%s)\n", filename); // OPEN if (e->flags&O_GWRONCE) { e->flags=O_RDWR|O_CREAT; } char pageflush=0; int cpu_fd=-1; struct stat s; if (e->do_not_open){ if ( stat(filename,&s) <0 ) { fprintf(stderr," problem with STAT file %s on CPU: %s\n",filename, strerror(errno));} // fprintf(stderr,"Do not open for inode %d, time %d\n",s.st_ino, s.st_mtime); }else{ if (use_gpufs_lib) cpu_fd=gpufs_file_open(globals->gpufs_fd,gpuid,filename,e->flags,S_IRUSR|S_IWUSR,&pageflush); else { cpu_fd=open(filename, e->flags,S_IRUSR|S_IWUSR);} if (cpu_fd < 0) { fprintf(stderr, "Problem with opening file %s on CPU: %s \n ",filename, strerror(errno)); } if (fstat(cpu_fd,&s)) { fprintf(stderr,"Problem with fstat the file %s on CPU: %s \n ",filename,strerror(errno));} } //fprintf(stderr, "FD %d, inode %ld, size %ld, Found file %s\n",i, s.st_ino, s.st_size, filename); e->cpu_fd=cpu_fd; e->flush_cache=pageflush; e->cpu_inode=s.st_ino; e->size=s.st_size; e->cpu_timestamp=s.st_ctime; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); total_stat+=(_timestamp()-vvvv); } if (e->status == CPU_IPC_PENDING && e->cpu_fd>=0 ) { double vvvv1=_timestamp(); // do close // fprintf(stderr, "FD %d, closing file %s\n",i, e->filename); if (use_gpufs_lib) { if (e->is_dirty){ // if dirty, update gpufs device, but keep the file open e->cpu_fd=gpufs_file_close_stay_open(globals->gpufs_fd,gpuid,e->cpu_fd); }else{ e->cpu_fd=gpufs_file_close(globals->gpufs_fd,gpuid,e->cpu_fd); } gpufs_drop_residence(globals->gpufs_fd, gpuid, e->drop_residence_inode); }else{ if (!e->is_dirty){ e->cpu_fd=close(e->cpu_fd); } } __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); total_stat1+=_timestamp()-vvvv1; } } } Page diff_page; uchar* diff_and_merge(const Page* page, uint req_cpu_fd, size_t req_size, size_t req_file_offset){ struct stat s; if (fstat(req_cpu_fd,&s)) s.st_size=0; int data_read; if ( s.st_size<req_file_offset) { data_read=0; } else { data_read=pread(req_cpu_fd,&diff_page,req_size,req_file_offset); } //fprintf(stderr,"read %d, offset %d\n",data_read,req_file_offset); if (data_read<0) { perror("pread failed while diff\n"); req_size=(size_t)-1; } //if (data_read==0) fprintf(stderr,"empty read\n"); // uchar* tmp=(uchar*)page; // uchar* data_ptr=(uchar*)&diff_page; // if (data_read==0){ // data_ptr=tmp; // copy directly from the buffer // }else typedef char v32c __attribute__ ((vector_size (16))); uchar* data_ptr=(uchar*)page; v32c* A_v=(v32c*)data_ptr; v32c* B_v=(v32c*)&diff_page;; if (data_read>0){ // perform diff-ed write // for(int zzz=0;zzz<data_read;zzz++) // { // if (tmp[zzz]) { // ((uchar*)diff_page)[zzz]=tmp[zzz]; /// } // } int left=data_read%sizeof(v32c); for(int zzz=0;zzz<(data_read/sizeof(v32c)+(left!=0));zzz++) { // new is new OR old //data_ptr[zzz]=data_ptr[zzz]|((uchar*)diff_page)[zzz]; A_v[zzz]=A_v[zzz]|B_v[zzz]; } //memcpy(((char*)&diff_page)+data_read,tmp+data_read,req_size-data_read); } return data_ptr; } void async_close_loop(volatile GPUGlobals* globals) { async_close_rb_t* rb=globals->async_close_rb; char* no_files=getenv("GPU_NOFILE"); /* data must be read synchronously from GPU, but then __can be__ written asynchronously by CPU. -- TODO! The goal is to read as much as possible from GPU in order to make CPU close as fast as possible */ Page* page=globals->streamMgr->async_close_scratch; page_md_t md; while(rb->dequeue(page,&md, globals->streamMgr->async_close_stream)){ // drain the ringbuffer int res; if (md.last_page==1){ // that's the last fprintf(stderr,"closing dirty file %d\n",md.cpu_fd); res=close(md.cpu_fd); if (res<0) perror("Async close failed, and nobody to report to:\n"); }else{ if (!no_files){ //fprintf(stderr,"writing async close at offset: %d content; %d\n",md.file_offset,md.content_size); uchar* to_write; if (md.type == RW_IPC_DIFF ){ to_write= diff_and_merge(page, md.cpu_fd, md.content_size, md.file_offset); }else{ to_write=(uchar*)page; } int ws=pwrite(md.cpu_fd, to_write, md.content_size,md.file_offset); if (ws!=md.content_size){ perror("Writing while async close failed, and nobody to report to:\n"); } } } } } int max_req=0; int report=0; void rw_loop(volatile GPUGlobals* globals) { char* no_pci=getenv("GPU_NOPCI"); if (no_pci&&!report) fprintf(stderr,"Warning: no data will be transferred in and out of the GPU\n"); char* no_files=getenv("GPU_NOFILE"); if (no_files&&!report) fprintf(stderr,"Warning: no file reads/writes will be performed\n"); report=1; int cur_req=0; for (int i=0;i<RW_IPC_SIZE;i++) { volatile CPU_IPC_RW_Entry* e=&globals->cpu_ipcRWQueue->entries[i]; if(e->status == CPU_IPC_PENDING) { cur_req++; /* fprintf(stderr, "FD %d, cpu_fd %d, buf_offset %d, size " "%d, type %s, ret_val %d\n",i, e->cpu_fd, e->buffer_offset, e->size, e->type==RW_IPC_READ?"read":"write", e->return_value ); */ int req_cpu_fd =e->cpu_fd; size_t req_buffer_offset =e->buffer_offset; size_t req_file_offset =e->file_offset; size_t req_size =e->size; int req_type =e->type; assert(req_type == RW_IPC_READ || req_type == RW_IPC_WRITE || req_type == RW_IPC_DIFF || req_type == RW_IPC_TRUNC ); if (req_type!=RW_IPC_TRUNC){ assert(req_cpu_fd>=0 && req_size>0 ); } if(globals->streamMgr->task_array[i]!=-1) { // we only need to check the stream cudaError_t cuda_status= cudaStreamQuery(globals->streamMgr->memStream[i]); if ( cudaErrorNotReady == cuda_status ) { // rush to the next request, this one is not ready continue; } if ( cuda_status != cudaSuccess) { fprintf(stderr, "Error in the host loop.\n "); cudaError_t error = cudaDeviceSynchronize(); fprintf(stderr,"Device failed, CUDA error message is: %s\n\n", cudaGetErrorString(error)); exit(-1); } // we are here only if success } printf("[gpufs] req:(%d)\n", req_type); switch(req_type) { case RW_IPC_READ: { // read int cpu_read_size=0; if (globals->streamMgr->task_array[i]==-1) // the request only started to be served { if(!no_files){ transfer_time-=_timestamp(); cpu_read_size=pread(req_cpu_fd, globals->streamMgr->scratch[i], req_size,req_file_offset); transfer_time+=_timestamp(); }else{ cpu_read_size=req_size; } char fname[256]; if (cpu_read_size < 0) { fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Problem with reading file %s on CPU: %s \n ", fname, strerror(errno)); } // if (cpu_read_size != req_size ) { fprintf(stderr, "Read %d required %d on CPU\n ", cpu_read_size, req_size); } // if (cpu_read_size ==0 ) { fprintf(stderr,"Nothing has been read\n");} e->return_value=cpu_read_size; if (cpu_read_size > 0) { globals->streamMgr->task_array[i]=req_type; if (!no_pci){ CUDA_SAFE_CALL(cudaMemcpyAsync(((char*)globals->rawStorage)+req_buffer_offset, globals->streamMgr->scratch[i], cpu_read_size,cudaMemcpyHostToDevice,globals->streamMgr->memStream[i])); } } } // if read failed or we did not update cpu_read_size since we didn't take the previous if if (cpu_read_size <=0) { // complete the request globals->streamMgr->task_array[i]=-1; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; case RW_IPC_TRUNC: e->return_value=ftruncate(req_cpu_fd,0); __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); break; case RW_IPC_DIFF: { if (globals->streamMgr->task_array[i]==-1) { globals->streamMgr->task_array[i]=req_type; // enqueue if (!no_pci){ // fprintf(stderr,"RW_IPC_DIFF buf_offset %llu, size %llu\n", req_buffer_offset, req_size); CUDA_SAFE_CALL(cudaMemcpyAsync( globals->streamMgr->scratch[i], ((char*)globals->rawStorage)+req_buffer_offset, req_size,cudaMemcpyDeviceToHost,globals->streamMgr->memStream[i])); } }else{ globals->streamMgr->task_array[i]=-1; // request completion if (!no_files){ uchar* to_write=diff_and_merge((Page*)globals->streamMgr->scratch[i],req_cpu_fd,req_size,req_file_offset); int res=pwrite(req_cpu_fd,to_write,req_size,req_file_offset); if (res!=req_size) { perror("pwrite failed on diff\n"); req_size=(size_t)-1; } }// end of no_files e->return_value=req_size; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; case RW_IPC_WRITE: { if (globals->streamMgr->task_array[i]==-1) { if (!no_pci){ CUDA_SAFE_CALL(cudaMemcpyAsync(globals->streamMgr->scratch[i], ((char*)globals->rawStorage)+req_buffer_offset, req_size,cudaMemcpyDeviceToHost,globals->streamMgr->memStream[i])); } globals->streamMgr->task_array[i]=req_type; // enqueue }else{ globals->streamMgr->task_array[i]=-1; // compelte int cpu_write_size=req_size; if(!no_files){ cpu_write_size=pwrite(req_cpu_fd, globals->streamMgr->scratch[i], req_size,req_file_offset); } if (cpu_write_size < 0) { char fname[256]; fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Problem with writing file %s on CPU: %s \n ",fname, strerror(errno)); } if (cpu_write_size != req_size ) { char fname[256]; fd2name(req_cpu_fd,fname,256); fprintf(stderr, "Wrote less than expected on CPU for file %s\n ",fname); } e->return_value=cpu_write_size; __sync_synchronize(); e->status=CPU_IPC_READY; __sync_synchronize(); } } break; default: assert(NULL); } } } if (max_req<cur_req) max_req=cur_req; } void logGPUfsDone() { fprintf(stderr,"kernel is complete\n"); fprintf(stderr,"Max pending requests: %d\n",max_req); fprintf(stderr,"Transfer time: %.3f\n",transfer_time); transfer_time=0; }
76c8f4322d65f48f4c85d7ba6c67e08797950f89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device.hpp" //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth bilateral filter namespace kfusion { namespace device { __global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= src.cols || y >= src.rows) return; int value = src(y, x); int tx = min (x - ksz / 2 + ksz, src.cols - 1); int ty = min (y - ksz / 2 + ksz, src.rows - 1); float sum1 = 0; float sum2 = 0; for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy) { for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx) { int depth = src(cy, cx); float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); float color2 = (value - depth) * (value - depth); float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half)); sum1 += depth * weight; sum2 += weight; } } dst(y, x) = __float2int_rn (sum1 / sum2); } } } void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block (32, 8); dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y)); cudaSafeCall( hipFuncSetCacheConfig (bilateral_kernel, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( bilateral_kernel), dim3(grid), dim3(block), 0, 0, src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth)); cudaSafeCall ( hipGetLastError () ); }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth truncation namespace kfusion { namespace device { __global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) if(depth(y, x) > max_dist) depth(y, x) = 0; } } } void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); hipLaunchKernelGGL(( truncate_depth_kernel), dim3(grid), dim3(block), 0, 0, depth, static_cast<ushort>(max_dist * 1000.f)); cudaSafeCall ( hipGetLastError() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Build depth pyramid namespace kfusion { namespace device { __global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src(2 * y, 2 * x); int tx = min (2 * x - D / 2 + D, src.cols - 1); int ty = min (2 * y - D / 2 + D, src.rows - 1); int cy = max (0, 2 * y - D / 2); int sum = 0; int count = 0; for (; cy < ty; ++cy) for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx) { int val = src(cy, cx); if (abs (val - center) < sigma_depth_mult3) { sum += val; ++count; } } dst(y, x) = (count == 0) ? 0 : sum / count; } } } void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block (32, 8); dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y)); hipLaunchKernelGGL(( pyramid_kernel), dim3(grid), dim3(block), 0, 0, source, pyramid, sigma_depth * 3); cudaSafeCall ( hipGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute normals namespace kfusion { namespace device { __global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); Normal n_out = make_float4(qnan, qnan, qnan, 0.f); if (x < depth.cols - 1 && y < depth.rows - 1) { //mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x+1) * 0.001f; float z10 = depth(y+1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x+1, y, z01); float3 v10 = reproj(x, y+1, z10); float3 n = normalized( cross (v01 - v00, v10 - v00) ); n_out = make_float4(-n.x, -n.y, -n.z, 0.f); } } normals(y, x) = n_out; } __global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float4 n = normals(y, x); if (isnan(n.x)) depth(y, x) = 0; } } } } void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); hipLaunchKernelGGL(( compute_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, reproj, normals); cudaSafeCall ( hipGetLastError () ); hipLaunchKernelGGL(( mask_depth_kernel), dim3(grid), dim3(block), 0, 0, normals, depth); cudaSafeCall ( hipGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute computePointNormals namespace kfusion { namespace device { __global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan); if (x >= depth.cols - 1 || y >= depth.rows - 1) return; //mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x+1) * 0.001f; float z10 = depth(y+1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x+1, y, z01); float3 v10 = reproj(x, y+1, z10); float3 n = normalized( cross (v01 - v00, v10 - v00) ); normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f); points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f); } } } } void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); hipLaunchKernelGGL(( points_normals_kernel), dim3(grid), dim3(block), 0, 0, reproj, depth, points, normals); cudaSafeCall ( hipGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute dists namespace kfusion { namespace device { __global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float xl = (x - c.x) * finv.x; float yl = (y - c.y) * finv.y; float lambda = sqrtf (xl * xl + yl * yl + 1); dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters } } } } void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); hipLaunchKernelGGL(( compute_dists_kernel), dim3(grid), dim3(block), 0, 0, depth, dists, make_float2(1.f/f.x, 1.f/f.y), c); cudaSafeCall ( hipGetLastError () ); } namespace kfusion { namespace device { __global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= ddst.cols || y >= ddst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); ushort d = 0; float4 n = make_float4(qnan, qnan, qnan, qnan); int xs = x * 2; int ys = y * 2; int d00 = dsrc(ys+0, xs+0); int d01 = dsrc(ys+0, xs+1); int d10 = dsrc(ys+1, xs+0); int d11 = dsrc(ys+1, xs+1); if (d00 * d01 != 0 && d10 * d11 != 0) { d = (d00 + d01 + d10 + d11)/4; float4 n00 = nsrc(ys+0, xs+0); float4 n01 = nsrc(ys+0, xs+1); float4 n10 = nsrc(ys+1, xs+0); float4 n11 = nsrc(ys+1, xs+1); n.x = (n00.x + n01.x + n10.x + n11.x)*0.25; n.y = (n00.y + n01.y + n10.y + n11.y)*0.25; n.z = (n00.z + n01.z + n10.z + n11.z)*0.25; } ddst(y, x) = d; ndst(y, x) = n; } } } void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out) { int in_cols = depth.cols (); int in_rows = depth.rows (); int out_cols = in_cols / 2; int out_rows = in_rows / 2; dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); hipLaunchKernelGGL(( resize_depth_normals_kernel), dim3(grid), dim3(block), 0, 0, depth, normals, depth_out, normals_out); cudaSafeCall ( hipGetLastError () ); } namespace kfusion { namespace device { __global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= vdst.cols || y >= vdst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f); int xs = x * 2; int ys = y * 2; float3 d00 = tr(vsrc(ys+0, xs+0)); float3 d01 = tr(vsrc(ys+0, xs+1)); float3 d10 = tr(vsrc(ys+1, xs+0)); float3 d11 = tr(vsrc(ys+1, xs+1)); if (!isnan(d00.x * d01.x * d10.x * d11.x)) { float3 d = (d00 + d01 + d10 + d11) * 0.25f; vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f); float3 n00 = tr(nsrc(ys+0, xs+0)); float3 n01 = tr(nsrc(ys+0, xs+1)); float3 n10 = tr(nsrc(ys+1, xs+0)); float3 n11 = tr(nsrc(ys+1, xs+1)); float3 n = (n00 + n01 + n10 + n11)*0.25f; ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f); } } } } void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out) { int out_cols = points.cols () / 2; int out_rows = points.rows () / 2; dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); hipLaunchKernelGGL(( resize_points_normals_kernel), dim3(grid), dim3(block), 0, 0, points, normals, points_out, normals_out); cudaSafeCall ( hipGetLastError () ); } namespace kfusion { namespace device { __global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals, const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dst.cols || y >= dst.rows) return; float3 color; int d = depth(y,x); if (d == 0) { const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f); const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f); float w = static_cast<float>(y) / dst.rows; color = bgr1 * (1 - w) + bgr2 * w; } else { float3 P = reproj(x, y, d * 0.001f); float3 N = tr(normals(y,x)); const float Ka = 0.3f; //ambient coeff const float Kd = 0.5f; //diffuse coeff const float Ks = 0.2f; //specular coeff const float n = 20.f; //specular power const float Ax = 1.f; //ambient color, can be RGB const float Dx = 1.f; //diffuse color, can be RGB const float Sx = 1.f; //specular color, can be RGB const float Lx = 1.f; //light color //Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n] float3 L = normalized(light_pose - P); float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P); float3 R = normalized(2 * N * dot(N, L) - L); float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n); color = make_float3(Ix, Ix, Ix); } uchar4 out; out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f); out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f); out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f); out.w = 0; dst(y, x) = out; } __global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals, const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dst.cols || y >= dst.rows) return; float3 color; float3 p = tr(points(y,x)); if (isnan(p.x)) { const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f); const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f); float w = static_cast<float>(y) / dst.rows; color = bgr1 * (1 - w) + bgr2 * w; } else { float3 P = p; float3 N = tr(normals(y,x)); const float Ka = 0.3f; //ambient coeff const float Kd = 0.5f; //diffuse coeff const float Ks = 0.2f; //specular coeff const float n = 20.f; //specular power const float Ax = 1.f; //ambient color, can be RGB const float Dx = 1.f; //diffuse color, can be RGB const float Sx = 1.f; //specular color, can be RGB const float Lx = 1.f; //light color //Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n] float3 L = normalized(light_pose - P); float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P); float3 R = normalized(2 * N * dot(N, L) - L); float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n); color = make_float3(Ix, Ix, Ix); } uchar4 out; out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f); out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f); out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f); out.w = 0; dst(y, x) = out; } } } void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image) { dim3 block (32, 8); dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y)); hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<ushort>)depth, normals, reproj, light_pose, image); cudaSafeCall ( hipGetLastError () ); } void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image) { dim3 block (32, 8); dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y)); hipLaunchKernelGGL(( render_image_kernel), dim3(grid), dim3(block), 0, 0, (PtrStep<Point>)points, normals, reproj, light_pose, image); cudaSafeCall ( hipGetLastError () ); } namespace kfusion { namespace device { __global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= normals.cols || y >= normals.rows) return; float4 n = normals(y, x); #if 0 unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f); unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f); unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f); #else unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f); unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f); unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f); #endif colors(y, x) = make_uchar4(b, g, r, 0); } } } void kfusion::device::renderTangentColors(const Normals& normals, Image& image) { dim3 block (32, 8); dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y)); hipLaunchKernelGGL(( tangent_colors_kernel), dim3(grid), dim3(block), 0, 0, normals, image); cudaSafeCall ( hipGetLastError () ); } namespace kfusion { namespace device { __global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < output.size) { float4 p = cloud[idx]; float8 n = normals[idx]; float12 o; o.x = p.x; o.y = p.y; o.z = p.z; o.normal_x = n.x; o.normal_y = n.y; o.normal_z = n.z; output.data[idx] = o; } } } } void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output) { const int block = 256; int total = (int)output.size (); hipLaunchKernelGGL(( mergePointNormalKernel), dim3(divUp (total, block)), dim3(block), 0, 0, cloud, normals, output); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); }
76c8f4322d65f48f4c85d7ba6c67e08797950f89.cu
#include "device.hpp" //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth bilateral filter namespace kfusion { namespace device { __global__ void bilateral_kernel(const PtrStepSz<ushort> src, PtrStep<ushort> dst, const int ksz, const float sigma_spatial2_inv_half, const float sigma_depth2_inv_half) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= src.cols || y >= src.rows) return; int value = src(y, x); int tx = min (x - ksz / 2 + ksz, src.cols - 1); int ty = min (y - ksz / 2 + ksz, src.rows - 1); float sum1 = 0; float sum2 = 0; for (int cy = max (y - ksz / 2, 0); cy < ty; ++cy) { for (int cx = max (x - ksz / 2, 0); cx < tx; ++cx) { int depth = src(cy, cx); float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); float color2 = (value - depth) * (value - depth); float weight = __expf (-(space2 * sigma_spatial2_inv_half + color2 * sigma_depth2_inv_half)); sum1 += depth * weight; sum2 += weight; } } dst(y, x) = __float2int_rn (sum1 / sum2); } } } void kfusion::device::bilateralFilter (const Depth& src, Depth& dst, int kernel_size, float sigma_spatial, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block (32, 8); dim3 grid (divUp (src.cols (), block.x), divUp (src.rows (), block.y)); cudaSafeCall( cudaFuncSetCacheConfig (bilateral_kernel, cudaFuncCachePreferL1) ); bilateral_kernel<<<grid, block>>>(src, dst, kernel_size, 0.5f / (sigma_spatial * sigma_spatial), 0.5f / (sigma_depth * sigma_depth)); cudaSafeCall ( cudaGetLastError () ); }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Depth truncation namespace kfusion { namespace device { __global__ void truncate_depth_kernel(PtrStepSz<ushort> depth, ushort max_dist /*mm*/) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth.cols && y < depth.rows) if(depth(y, x) > max_dist) depth(y, x) = 0; } } } void kfusion::device::truncateDepth(Depth& depth, float max_dist /*meters*/) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); truncate_depth_kernel<<<grid, block>>>(depth, static_cast<ushort>(max_dist * 1000.f)); cudaSafeCall ( cudaGetLastError() ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Build depth pyramid namespace kfusion { namespace device { __global__ void pyramid_kernel(const PtrStepSz<ushort> src, PtrStepSz<ushort> dst, float sigma_depth_mult3) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src(2 * y, 2 * x); int tx = min (2 * x - D / 2 + D, src.cols - 1); int ty = min (2 * y - D / 2 + D, src.rows - 1); int cy = max (0, 2 * y - D / 2); int sum = 0; int count = 0; for (; cy < ty; ++cy) for (int cx = max (0, 2 * x - D / 2); cx < tx; ++cx) { int val = src(cy, cx); if (abs (val - center) < sigma_depth_mult3) { sum += val; ++count; } } dst(y, x) = (count == 0) ? 0 : sum / count; } } } void kfusion::device::depthPyr(const Depth& source, Depth& pyramid, float sigma_depth) { sigma_depth *= 1000; // meters -> mm dim3 block (32, 8); dim3 grid (divUp(pyramid.cols(), block.x), divUp(pyramid.rows(), block.y)); pyramid_kernel<<<grid, block>>>(source, pyramid, sigma_depth * 3); cudaSafeCall ( cudaGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute normals namespace kfusion { namespace device { __global__ void compute_normals_kernel(const PtrStepSz<ushort> depth, const Reprojector reproj, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); Normal n_out = make_float4(qnan, qnan, qnan, 0.f); if (x < depth.cols - 1 && y < depth.rows - 1) { //mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x+1) * 0.001f; float z10 = depth(y+1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x+1, y, z01); float3 v10 = reproj(x, y+1, z10); float3 n = normalized( cross (v01 - v00, v10 - v00) ); n_out = make_float4(-n.x, -n.y, -n.z, 0.f); } } normals(y, x) = n_out; } __global__ void mask_depth_kernel(const PtrStep<Normal> normals, PtrStepSz<ushort> depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float4 n = normals(y, x); if (isnan(n.x)) depth(y, x) = 0; } } } } void kfusion::device::computeNormalsAndMaskDepth(const Reprojector& reproj, Depth& depth, Normals& normals) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); compute_normals_kernel<<<grid, block>>>(depth, reproj, normals); cudaSafeCall ( cudaGetLastError () ); mask_depth_kernel<<<grid, block>>>(normals, depth); cudaSafeCall ( cudaGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute computePointNormals namespace kfusion { namespace device { __global__ void points_normals_kernel(const Reprojector reproj, const PtrStepSz<ushort> depth, PtrStep<Point> points, PtrStep<Normal> normals) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); points(y, x) = normals(y, x) = make_float4(qnan, qnan, qnan, qnan); if (x >= depth.cols - 1 || y >= depth.rows - 1) return; //mm -> meters float z00 = depth(y, x) * 0.001f; float z01 = depth(y, x+1) * 0.001f; float z10 = depth(y+1, x) * 0.001f; if (z00 * z01 * z10 != 0) { float3 v00 = reproj(x, y, z00); float3 v01 = reproj(x+1, y, z01); float3 v10 = reproj(x, y+1, z10); float3 n = normalized( cross (v01 - v00, v10 - v00) ); normals(y, x) = make_float4(-n.x, -n.y, -n.z, 0.f); points(y, x) = make_float4(v00.x, v00.y, v00.z, 0.f); } } } } void kfusion::device::computePointNormals(const Reprojector& reproj, const Depth& depth, Points& points, Normals& normals) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); points_normals_kernel<<<grid, block>>>(reproj, depth, points, normals); cudaSafeCall ( cudaGetLastError () ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// Compute dists namespace kfusion { namespace device { __global__ void compute_dists_kernel(const PtrStepSz<ushort> depth, Dists dists, float2 finv, float2 c) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < depth.cols || y < depth.rows) { float xl = (x - c.x) * finv.x; float yl = (y - c.y) * finv.y; float lambda = sqrtf (xl * xl + yl * yl + 1); dists(y, x) = __float2half_rn(depth(y, x) * lambda * 0.001f); //meters } } } } void kfusion::device::compute_dists(const Depth& depth, Dists dists, float2 f, float2 c) { dim3 block (32, 8); dim3 grid (divUp (depth.cols (), block.x), divUp (depth.rows (), block.y)); compute_dists_kernel<<<grid, block>>>(depth, dists, make_float2(1.f/f.x, 1.f/f.y), c); cudaSafeCall ( cudaGetLastError () ); } namespace kfusion { namespace device { __global__ void resize_depth_normals_kernel(const PtrStep<ushort> dsrc, const PtrStep<float4> nsrc, PtrStepSz<ushort> ddst, PtrStep<float4> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= ddst.cols || y >= ddst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); ushort d = 0; float4 n = make_float4(qnan, qnan, qnan, qnan); int xs = x * 2; int ys = y * 2; int d00 = dsrc(ys+0, xs+0); int d01 = dsrc(ys+0, xs+1); int d10 = dsrc(ys+1, xs+0); int d11 = dsrc(ys+1, xs+1); if (d00 * d01 != 0 && d10 * d11 != 0) { d = (d00 + d01 + d10 + d11)/4; float4 n00 = nsrc(ys+0, xs+0); float4 n01 = nsrc(ys+0, xs+1); float4 n10 = nsrc(ys+1, xs+0); float4 n11 = nsrc(ys+1, xs+1); n.x = (n00.x + n01.x + n10.x + n11.x)*0.25; n.y = (n00.y + n01.y + n10.y + n11.y)*0.25; n.z = (n00.z + n01.z + n10.z + n11.z)*0.25; } ddst(y, x) = d; ndst(y, x) = n; } } } void kfusion::device::resizeDepthNormals(const Depth& depth, const Normals& normals, Depth& depth_out, Normals& normals_out) { int in_cols = depth.cols (); int in_rows = depth.rows (); int out_cols = in_cols / 2; int out_rows = in_rows / 2; dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resize_depth_normals_kernel<<<grid, block>>>(depth, normals, depth_out, normals_out); cudaSafeCall ( cudaGetLastError () ); } namespace kfusion { namespace device { __global__ void resize_points_normals_kernel(const PtrStep<Point> vsrc, const PtrStep<Normal> nsrc, PtrStepSz<Point> vdst, PtrStep<Normal> ndst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= vdst.cols || y >= vdst.rows) return; const float qnan = numeric_limits<float>::quiet_NaN (); vdst(y, x) = ndst(y, x) = make_float4(qnan, qnan, qnan, 0.f); int xs = x * 2; int ys = y * 2; float3 d00 = tr(vsrc(ys+0, xs+0)); float3 d01 = tr(vsrc(ys+0, xs+1)); float3 d10 = tr(vsrc(ys+1, xs+0)); float3 d11 = tr(vsrc(ys+1, xs+1)); if (!isnan(d00.x * d01.x * d10.x * d11.x)) { float3 d = (d00 + d01 + d10 + d11) * 0.25f; vdst(y, x) = make_float4(d.x, d.y, d.z, 0.f); float3 n00 = tr(nsrc(ys+0, xs+0)); float3 n01 = tr(nsrc(ys+0, xs+1)); float3 n10 = tr(nsrc(ys+1, xs+0)); float3 n11 = tr(nsrc(ys+1, xs+1)); float3 n = (n00 + n01 + n10 + n11)*0.25f; ndst(y, x) = make_float4(n.x, n.y, n.z, 0.f); } } } } void kfusion::device::resizePointsNormals(const Points& points, const Normals& normals, Points& points_out, Normals& normals_out) { int out_cols = points.cols () / 2; int out_rows = points.rows () / 2; dim3 block (32, 8); dim3 grid (divUp (out_cols, block.x), divUp (out_rows, block.y)); resize_points_normals_kernel<<<grid, block>>>(points, normals, points_out, normals_out); cudaSafeCall ( cudaGetLastError () ); } namespace kfusion { namespace device { __global__ void render_image_kernel(const PtrStep<ushort> depth, const PtrStep<Normal> normals, const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dst.cols || y >= dst.rows) return; float3 color; int d = depth(y,x); if (d == 0) { const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f); const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f); float w = static_cast<float>(y) / dst.rows; color = bgr1 * (1 - w) + bgr2 * w; } else { float3 P = reproj(x, y, d * 0.001f); float3 N = tr(normals(y,x)); const float Ka = 0.3f; //ambient coeff const float Kd = 0.5f; //diffuse coeff const float Ks = 0.2f; //specular coeff const float n = 20.f; //specular power const float Ax = 1.f; //ambient color, can be RGB const float Dx = 1.f; //diffuse color, can be RGB const float Sx = 1.f; //specular color, can be RGB const float Lx = 1.f; //light color //Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n] float3 L = normalized(light_pose - P); float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P); float3 R = normalized(2 * N * dot(N, L) - L); float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n); color = make_float3(Ix, Ix, Ix); } uchar4 out; out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f); out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f); out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f); out.w = 0; dst(y, x) = out; } __global__ void render_image_kernel(const PtrStep<Point> points, const PtrStep<Normal> normals, const Reprojector reproj, const float3 light_pose, PtrStepSz<uchar4> dst) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= dst.cols || y >= dst.rows) return; float3 color; float3 p = tr(points(y,x)); if (isnan(p.x)) { const float3 bgr1 = make_float3(4.f/255.f, 2.f/255.f, 2.f/255.f); const float3 bgr2 = make_float3(236.f/255.f, 120.f/255.f, 120.f/255.f); float w = static_cast<float>(y) / dst.rows; color = bgr1 * (1 - w) + bgr2 * w; } else { float3 P = p; float3 N = tr(normals(y,x)); const float Ka = 0.3f; //ambient coeff const float Kd = 0.5f; //diffuse coeff const float Ks = 0.2f; //specular coeff const float n = 20.f; //specular power const float Ax = 1.f; //ambient color, can be RGB const float Dx = 1.f; //diffuse color, can be RGB const float Sx = 1.f; //specular color, can be RGB const float Lx = 1.f; //light color //Ix = Ax*Ka*Dx + Att*Lx [Kd*Dx*(N dot L) + Ks*Sx*(R dot V)^n] float3 L = normalized(light_pose - P); float3 V = normalized(make_float3(0.f, 0.f, 0.f) - P); float3 R = normalized(2 * N * dot(N, L) - L); float Ix = Ax*Ka*Dx + Lx * Kd * Dx * fmax(0.f, dot(N, L)) + Lx * Ks * Sx * __powf(fmax(0.f, dot(R, V)), n); color = make_float3(Ix, Ix, Ix); } uchar4 out; out.x = static_cast<unsigned char>(__saturatef(color.x) * 255.f); out.y = static_cast<unsigned char>(__saturatef(color.y) * 255.f); out.z = static_cast<unsigned char>(__saturatef(color.z) * 255.f); out.w = 0; dst(y, x) = out; } } } void kfusion::device::renderImage(const Depth& depth, const Normals& normals, const Reprojector& reproj, const float3& light_pose, Image& image) { dim3 block (32, 8); dim3 grid (divUp (depth.cols(), block.x), divUp (depth.rows(), block.y)); render_image_kernel<<<grid, block>>>((PtrStep<ushort>)depth, normals, reproj, light_pose, image); cudaSafeCall ( cudaGetLastError () ); } void kfusion::device::renderImage(const Points& points, const Normals& normals, const Reprojector& reproj, const Vec3f& light_pose, Image& image) { dim3 block (32, 8); dim3 grid (divUp (points.cols(), block.x), divUp (points.rows(), block.y)); render_image_kernel<<<grid, block>>>((PtrStep<Point>)points, normals, reproj, light_pose, image); cudaSafeCall ( cudaGetLastError () ); } namespace kfusion { namespace device { __global__ void tangent_colors_kernel(PtrStepSz<Normal> normals, PtrStep<uchar4> colors) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= normals.cols || y >= normals.rows) return; float4 n = normals(y, x); #if 0 unsigned char r = static_cast<unsigned char>(__saturatef((-n.x + 1.f)/2.f) * 255.f); unsigned char g = static_cast<unsigned char>(__saturatef((-n.y + 1.f)/2.f) * 255.f); unsigned char b = static_cast<unsigned char>(__saturatef((-n.z + 1.f)/2.f) * 255.f); #else unsigned char r = static_cast<unsigned char>((5.f - n.x * 3.5f) * 25.5f); unsigned char g = static_cast<unsigned char>((5.f - n.y * 2.5f) * 25.5f); unsigned char b = static_cast<unsigned char>((5.f - n.z * 3.5f) * 25.5f); #endif colors(y, x) = make_uchar4(b, g, r, 0); } } } void kfusion::device::renderTangentColors(const Normals& normals, Image& image) { dim3 block (32, 8); dim3 grid (divUp (normals.cols(), block.x), divUp (normals.rows(), block.y)); tangent_colors_kernel<<<grid, block>>>(normals, image); cudaSafeCall ( cudaGetLastError () ); } namespace kfusion { namespace device { __global__ void mergePointNormalKernel (const Point* cloud, const float8* normals, PtrSz<float12> output) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < output.size) { float4 p = cloud[idx]; float8 n = normals[idx]; float12 o; o.x = p.x; o.y = p.y; o.z = p.z; o.normal_x = n.x; o.normal_y = n.y; o.normal_z = n.z; output.data[idx] = o; } } } } void kfusion::device::mergePointNormal (const DeviceArray<Point>& cloud, const DeviceArray<float8>& normals, const DeviceArray<float12>& output) { const int block = 256; int total = (int)output.size (); mergePointNormalKernel<<<divUp (total, block), block>>>(cloud, normals, output); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); }
3644e3ecf3e9e383a7160d798553a96a5f4537dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <algorithm> #include <imageMatrix.h> /* ******* * * KERNELS * * ******* */ /**** MATMUL ****/ __global__ void matMulKernel(float* A, float* B, float* C, int m, int k, int n) { int COL = blockIdx.x*blockDim.x+threadIdx.x; int ROW = blockIdx.y*blockDim.y+threadIdx.y; if (ROW<m && COL<n) { float tmpSum = 0.0f; for (int i = 0; i < k; ++i) { tmpSum += A[(ROW*k)+i] * B[(i*n)+COL]; } C[(ROW*n)+COL] = tmpSum; } return ; } /**** SQUARE MATMUL ****/ __global__ void squareMatMulKernel(float* A, float* B, float* C, int N) { int COL = blockIdx.x*blockDim.x+threadIdx.x; int ROW = blockIdx.y*blockDim.y+threadIdx.y; if (ROW<N && COL<N) { float tmpSum=0.0f; for (int i = 0; i < N; ++i) { tmpSum += A[(ROW*N)+i] * B[(i*N)+COL]; } C[(ROW*N)+COL] = tmpSum; } return ; } /***** SHARED MATMUL *****/ #define TILE_WIDTH 16 __global__ void sharedMatMulKernel(float *A, float *B, float *C, int size) { //const int blockSize = blockDim.x; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = size * TILE_WIDTH * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + size - 1; // Step size used to iterate through the sub-matrices of A int aStep = TILE_WIDTH; // Index of the first sub-matrix of B processed by the block int bBegin = TILE_WIDTH * bx; // Step size used to iterate through the sub-matrices of B int bStep = TILE_WIDTH * size; // The element of the block sub-matrix that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrices of A and B __shared__ float As[TILE_WIDTH][TILE_WIDTH]; __shared__ float Bs[TILE_WIDTH][TILE_WIDTH]; // Load the matrices from global memory to shared memory, each thread loads one element of each matrix As[ty][tx] = A[a + size * ty + tx]; Bs[ty][tx] = B[b + size * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); #pragma unroll // Multiply the two matrices together, each thread computes one element of the block sub-matrix for (int k = 0; k < TILE_WIDTH; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory, each thread writes one element int c = size * TILE_WIDTH * by + TILE_WIDTH * bx; C[c + size * ty + tx] = Csub; } /**** BLURBOX ****/ __global__ void blurBoxFilterKer(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x+threadIdx.x; int dim = width*height*3; if(offset<dim){ int x = offset % width; int y = (offset-x)/width; int fsize = 5; // Filter size if(offset < width*height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for(int ox = -fsize; ox < fsize+1; ++ox) { for(int oy = -fsize; oy < fsize+1; ++oy) { if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = (offset+ox+oy*width)*3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset+1]; output_blue += input_image[currentoffset+2]; hits++; } } } output_image[offset*3] = output_red/hits; output_image[offset*3+1] = output_green/hits; output_image[offset*3+2] = output_blue/hits; } } return; } /**** GRID-STRIDE MATMUL ****/ __global__ void matMulGridStride(float* A, float* B, float* C, int m, int k, int n) { int ROW = blockIdx.x*blockDim.x+threadIdx.x; int COL = blockIdx.y*blockDim.y+threadIdx.y; int Rstride = blockDim.x*gridDim.x; int Cstride = blockDim.y*gridDim.y; for (int r=ROW; r<m; r+=Rstride) { for (int c=COL; c<n; c+=Cstride) { float tmpSum = 0.0f; for (int i = 0; i <k; ++i) { tmpSum += A[(r*k)+i] * B[(i*n)+c]; } C[(r*n)+c] = tmpSum; } } return ; } /**** GRID-STRIDE SQUARE MATMUL ****/ __global__ void squareMatMulGridStrideKer(float* A, float* B, float* C, int N) { int ROW = blockIdx.x*blockDim.x+threadIdx.x; int COL = blockIdx.y*blockDim.y+threadIdx.y; int Rstride = blockDim.x*gridDim.x; int Cstride = blockDim.y*gridDim.y; float tmpSum = 0.0f; for (int k=ROW; k<N; k+=Rstride) { for (int j=COL; j<N; j+=Cstride) { tmpSum=0; for (int i = 0; i < N; i++) { tmpSum += A[(k*N)+i] * B[(i*N)+j]; } C[(k*N)+j] = tmpSum; } } return ; } /**** GRID-STRIDE BLUR BOX ****/ __global__ void blurBoxGridStride(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int stride = gridDim.x * blockDim.x; int fsize = 5; // Filter size for(int i=offset; i<width*height; i+=stride) { int x = offset % width; int y = (offset-x)/width; float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for(int ox = -fsize; ox < fsize+1; ++ox) { for(int oy = -fsize; oy < fsize+1; ++oy) { if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = ( i +ox+oy*width)*3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset+1]; output_blue += input_image[currentoffset+2]; hits++; } } } output_image[i *3] = output_red/hits; output_image[i *3+1] = output_green/hits; output_image[i *3+2] = output_blue/hits; } return ; } /******************* **KERNEL LAUNCHERS** ********************/ #ifdef MATMUL /**** SQUARE MATMUL ****/ void streamSquareMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int n, hipStream_t strm, bool shared) { unsigned int bytesMat = n*n*sizeof(float); // H2D memCopy gpuErrchk( hipMemcpyAsync(Ad, A, bytesMat, hipMemcpyHostToDevice, strm) ); gpuErrchk( hipMemcpyAsync(Bd, B, bytesMat, hipMemcpyHostToDevice, strm) ); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDx,1 ); // Kernel launch if(shared) hipLaunchKernelGGL(( sharedMatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, strm, Ad, Bd, Cd, n); else hipLaunchKernelGGL(( squareMatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, strm, Ad, Bd, Cd, n); // D2H memCopy gpuErrchk( hipMemcpyAsync( C, Cd, bytesMat, hipMemcpyDeviceToHost, strm) ); #ifndef MEASURES gpuErrchk( hipPeekAtLastError() ); #endif } void squareMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int n, bool shared) { unsigned int bytesMat = n*n*sizeof(float); // H2D memCopy gpuErrchk( hipMemcpy(Ad, A, bytesMat, hipMemcpyHostToDevice) ); gpuErrchk( hipMemcpy(Bd, B, bytesMat, hipMemcpyHostToDevice) ); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDx,1 ); // Kernel launch if(shared) hipLaunchKernelGGL(( sharedMatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, Cd, n); else hipLaunchKernelGGL(( squareMatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, Cd, n); // D2H memCopy gpuErrchk( hipMemcpy( C, Cd, bytesMat, hipMemcpyDeviceToHost) ); #ifndef MEASURES gpuErrchk( hipPeekAtLastError() ); #endif } /**** NON SQUARE MATMUL ****/ void streamMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int m, int k, int n, hipStream_t strm) { unsigned int bytesA = m*k*sizeof(float); unsigned int bytesB = k*n*sizeof(float); unsigned int bytesC = m*n*sizeof(float); // H2D memCopy hipMemcpyAsync(Ad, A, bytesA, hipMemcpyHostToDevice, strm); hipMemcpyAsync(Bd, B, bytesB, hipMemcpyHostToDevice, strm); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDy,1 ); // Kernel launch hipLaunchKernelGGL(( matMulKernel), dim3(dimGrid), dim3(dimBlock), 0, strm, Ad, Bd, Cd, m, k, n); // D2H memCopy hipMemcpyAsync( C, Cd, bytesC, hipMemcpyDeviceToHost, strm); } void matMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int m, int k, int n) { unsigned int bytesA = m*k*sizeof(float); unsigned int bytesB = k*n*sizeof(float); unsigned int bytesC = m*n*sizeof(float); // H2D memCopy hipMemcpy(Ad, A, bytesA, hipMemcpyHostToDevice); hipMemcpy(Bd, B, bytesB, hipMemcpyHostToDevice); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDy,1 ); // Kernel launch hipLaunchKernelGGL(( matMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, Cd, m, k, n); // D2H memCopy hipMemcpy( C, Cd, bytesC, hipMemcpyDeviceToHost); } #endif /**** BLURBOX ****/ #ifdef BLURBOX void streamBlurBoxFilter (unsigned char *in_h, unsigned char *out_h, unsigned char *in_d, unsigned char *out_d, int width, int height, hipStream_t strm) { unsigned int size = width*height*3; // H2D memCopy gpuErrchk( hipMemcpyAsync(in_d, in_h, size, hipMemcpyHostToDevice, strm) ); // Grid and Block setting #ifdef LOWPAR GRIDx = 1; #else GRIDx = (unsigned int)((size+BLOCK-1)/BLOCK); #endif // Kernel launch #ifdef LOWPAR hipLaunchKernelGGL(( blurBoxGridStride), dim3(GRIDx), dim3(BLOCK), 0, strm, in_d, out_d, width, height); #else hipLaunchKernelGGL(( blurBoxFilterKer), dim3(GRIDx), dim3(BLOCK), 0, strm, in_d, out_d, width, height); #endif // D2H memCopy gpuErrchk( hipMemcpyAsync( out_h, out_d, size, hipMemcpyDeviceToHost, strm) ); #ifndef MEASURES gpuErrchk( hipPeekAtLastError() ); #endif } void blurBoxFilter (unsigned char *in_h, unsigned char *out_h, unsigned char *in_d, unsigned char *out_d, int width, int height) { unsigned int size = width*height*3; // H2D memCopy gpuErrchk( hipMemcpy(in_d,in_h, size, hipMemcpyHostToDevice) ); // Grid and Block setting #ifdef LOWPAR GRIDx = 1; #else GRIDx = (unsigned int)((size+BLOCK-1)/BLOCK); #endif dim3 blockDims( BLOCK,1,1 ); dim3 gridDims( GRIDx,1,1 ); // Kernel launch #ifdef LOWPAR hipLaunchKernelGGL(( blurBoxGridStride), dim3(gridDims), dim3(blockDims), 0, 0, in_d, out_d, width, height); #else hipLaunchKernelGGL(( blurBoxFilterKer), dim3(gridDims), dim3(blockDims), 0, 0, in_d, out_d, width, height); #endif // D2H memCopy gpuErrchk( hipMemcpy( out_h, out_d, size, hipMemcpyDeviceToHost) ); } #endif
3644e3ecf3e9e383a7160d798553a96a5f4537dd.cu
#include <math.h> #include <algorithm> #include <imageMatrix.h> /* ******* * * KERNELS * * ******* */ /**** MATMUL ****/ __global__ void matMulKernel(float* A, float* B, float* C, int m, int k, int n) { int COL = blockIdx.x*blockDim.x+threadIdx.x; int ROW = blockIdx.y*blockDim.y+threadIdx.y; if (ROW<m && COL<n) { float tmpSum = 0.0f; for (int i = 0; i < k; ++i) { tmpSum += A[(ROW*k)+i] * B[(i*n)+COL]; } C[(ROW*n)+COL] = tmpSum; } return ; } /**** SQUARE MATMUL ****/ __global__ void squareMatMulKernel(float* A, float* B, float* C, int N) { int COL = blockIdx.x*blockDim.x+threadIdx.x; int ROW = blockIdx.y*blockDim.y+threadIdx.y; if (ROW<N && COL<N) { float tmpSum=0.0f; for (int i = 0; i < N; ++i) { tmpSum += A[(ROW*N)+i] * B[(i*N)+COL]; } C[(ROW*N)+COL] = tmpSum; } return ; } /***** SHARED MATMUL *****/ #define TILE_WIDTH 16 __global__ void sharedMatMulKernel(float *A, float *B, float *C, int size) { //const int blockSize = blockDim.x; // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = size * TILE_WIDTH * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + size - 1; // Step size used to iterate through the sub-matrices of A int aStep = TILE_WIDTH; // Index of the first sub-matrix of B processed by the block int bBegin = TILE_WIDTH * bx; // Step size used to iterate through the sub-matrices of B int bStep = TILE_WIDTH * size; // The element of the block sub-matrix that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Shared memory for the sub-matrices of A and B __shared__ float As[TILE_WIDTH][TILE_WIDTH]; __shared__ float Bs[TILE_WIDTH][TILE_WIDTH]; // Load the matrices from global memory to shared memory, each thread loads one element of each matrix As[ty][tx] = A[a + size * ty + tx]; Bs[ty][tx] = B[b + size * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); #pragma unroll // Multiply the two matrices together, each thread computes one element of the block sub-matrix for (int k = 0; k < TILE_WIDTH; ++k) Csub += As[ty][k] * Bs[k][tx]; // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to global memory, each thread writes one element int c = size * TILE_WIDTH * by + TILE_WIDTH * bx; C[c + size * ty + tx] = Csub; } /**** BLURBOX ****/ __global__ void blurBoxFilterKer(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x+threadIdx.x; int dim = width*height*3; if(offset<dim){ int x = offset % width; int y = (offset-x)/width; int fsize = 5; // Filter size if(offset < width*height) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for(int ox = -fsize; ox < fsize+1; ++ox) { for(int oy = -fsize; oy < fsize+1; ++oy) { if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = (offset+ox+oy*width)*3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset+1]; output_blue += input_image[currentoffset+2]; hits++; } } } output_image[offset*3] = output_red/hits; output_image[offset*3+1] = output_green/hits; output_image[offset*3+2] = output_blue/hits; } } return; } /**** GRID-STRIDE MATMUL ****/ __global__ void matMulGridStride(float* A, float* B, float* C, int m, int k, int n) { int ROW = blockIdx.x*blockDim.x+threadIdx.x; int COL = blockIdx.y*blockDim.y+threadIdx.y; int Rstride = blockDim.x*gridDim.x; int Cstride = blockDim.y*gridDim.y; for (int r=ROW; r<m; r+=Rstride) { for (int c=COL; c<n; c+=Cstride) { float tmpSum = 0.0f; for (int i = 0; i <k; ++i) { tmpSum += A[(r*k)+i] * B[(i*n)+c]; } C[(r*n)+c] = tmpSum; } } return ; } /**** GRID-STRIDE SQUARE MATMUL ****/ __global__ void squareMatMulGridStrideKer(float* A, float* B, float* C, int N) { int ROW = blockIdx.x*blockDim.x+threadIdx.x; int COL = blockIdx.y*blockDim.y+threadIdx.y; int Rstride = blockDim.x*gridDim.x; int Cstride = blockDim.y*gridDim.y; float tmpSum = 0.0f; for (int k=ROW; k<N; k+=Rstride) { for (int j=COL; j<N; j+=Cstride) { tmpSum=0; for (int i = 0; i < N; i++) { tmpSum += A[(k*N)+i] * B[(i*N)+j]; } C[(k*N)+j] = tmpSum; } } return ; } /**** GRID-STRIDE BLUR BOX ****/ __global__ void blurBoxGridStride(unsigned char* input_image, unsigned char* output_image, int width, int height) { const unsigned int offset = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int stride = gridDim.x * blockDim.x; int fsize = 5; // Filter size for(int i=offset; i<width*height; i+=stride) { int x = offset % width; int y = (offset-x)/width; float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for(int ox = -fsize; ox < fsize+1; ++ox) { for(int oy = -fsize; oy < fsize+1; ++oy) { if((x+ox) > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) { const int currentoffset = ( i +ox+oy*width)*3; output_red += input_image[currentoffset]; output_green += input_image[currentoffset+1]; output_blue += input_image[currentoffset+2]; hits++; } } } output_image[i *3] = output_red/hits; output_image[i *3+1] = output_green/hits; output_image[i *3+2] = output_blue/hits; } return ; } /******************* **KERNEL LAUNCHERS** ********************/ #ifdef MATMUL /**** SQUARE MATMUL ****/ void streamSquareMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int n, cudaStream_t strm, bool shared) { unsigned int bytesMat = n*n*sizeof(float); // H2D memCopy gpuErrchk( cudaMemcpyAsync(Ad, A, bytesMat, cudaMemcpyHostToDevice, strm) ); gpuErrchk( cudaMemcpyAsync(Bd, B, bytesMat, cudaMemcpyHostToDevice, strm) ); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDx,1 ); // Kernel launch if(shared) sharedMatMulKernel<<<dimGrid, dimBlock, 0, strm>>>(Ad, Bd, Cd, n); else squareMatMulKernel<<<dimGrid, dimBlock, 0, strm>>>(Ad, Bd, Cd, n); // D2H memCopy gpuErrchk( cudaMemcpyAsync( C, Cd, bytesMat, cudaMemcpyDeviceToHost, strm) ); #ifndef MEASURES gpuErrchk( cudaPeekAtLastError() ); #endif } void squareMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int n, bool shared) { unsigned int bytesMat = n*n*sizeof(float); // H2D memCopy gpuErrchk( cudaMemcpy(Ad, A, bytesMat, cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(Bd, B, bytesMat, cudaMemcpyHostToDevice) ); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDx,1 ); // Kernel launch if(shared) sharedMatMulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, n); else squareMatMulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, n); // D2H memCopy gpuErrchk( cudaMemcpy( C, Cd, bytesMat, cudaMemcpyDeviceToHost) ); #ifndef MEASURES gpuErrchk( cudaPeekAtLastError() ); #endif } /**** NON SQUARE MATMUL ****/ void streamMatMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int m, int k, int n, cudaStream_t strm) { unsigned int bytesA = m*k*sizeof(float); unsigned int bytesB = k*n*sizeof(float); unsigned int bytesC = m*n*sizeof(float); // H2D memCopy cudaMemcpyAsync(Ad, A, bytesA, cudaMemcpyHostToDevice, strm); cudaMemcpyAsync(Bd, B, bytesB, cudaMemcpyHostToDevice, strm); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDy,1 ); // Kernel launch matMulKernel<<<dimGrid, dimBlock, 0, strm>>>(Ad, Bd, Cd, m, k, n); // D2H memCopy cudaMemcpyAsync( C, Cd, bytesC, cudaMemcpyDeviceToHost, strm); } void matMul(float *A, float *B, float *C, float *Ad, float *Bd, float *Cd, int m, int k, int n) { unsigned int bytesA = m*k*sizeof(float); unsigned int bytesB = k*n*sizeof(float); unsigned int bytesC = m*n*sizeof(float); // H2D memCopy cudaMemcpy(Ad, A, bytesA, cudaMemcpyHostToDevice); cudaMemcpy(Bd, B, bytesB, cudaMemcpyHostToDevice); // Grid and Block setting dim3 dimBlock( BLOCK,BLOCK,1 ); dim3 dimGrid( GRIDx,GRIDy,1 ); // Kernel launch matMulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, m, k, n); // D2H memCopy cudaMemcpy( C, Cd, bytesC, cudaMemcpyDeviceToHost); } #endif /**** BLURBOX ****/ #ifdef BLURBOX void streamBlurBoxFilter (unsigned char *in_h, unsigned char *out_h, unsigned char *in_d, unsigned char *out_d, int width, int height, cudaStream_t strm) { unsigned int size = width*height*3; // H2D memCopy gpuErrchk( cudaMemcpyAsync(in_d, in_h, size, cudaMemcpyHostToDevice, strm) ); // Grid and Block setting #ifdef LOWPAR GRIDx = 1; #else GRIDx = (unsigned int)((size+BLOCK-1)/BLOCK); #endif // Kernel launch #ifdef LOWPAR blurBoxGridStride<<<GRIDx, BLOCK, 0, strm>>>(in_d, out_d, width, height); #else blurBoxFilterKer<<<GRIDx, BLOCK, 0, strm>>>(in_d, out_d, width, height); #endif // D2H memCopy gpuErrchk( cudaMemcpyAsync( out_h, out_d, size, cudaMemcpyDeviceToHost, strm) ); #ifndef MEASURES gpuErrchk( cudaPeekAtLastError() ); #endif } void blurBoxFilter (unsigned char *in_h, unsigned char *out_h, unsigned char *in_d, unsigned char *out_d, int width, int height) { unsigned int size = width*height*3; // H2D memCopy gpuErrchk( cudaMemcpy(in_d,in_h, size, cudaMemcpyHostToDevice) ); // Grid and Block setting #ifdef LOWPAR GRIDx = 1; #else GRIDx = (unsigned int)((size+BLOCK-1)/BLOCK); #endif dim3 blockDims( BLOCK,1,1 ); dim3 gridDims( GRIDx,1,1 ); // Kernel launch #ifdef LOWPAR blurBoxGridStride<<<gridDims, blockDims>>>(in_d, out_d, width, height); #else blurBoxFilterKer<<<gridDims, blockDims>>>(in_d, out_d, width, height); #endif // D2H memCopy gpuErrchk( cudaMemcpy( out_h, out_d, size, cudaMemcpyDeviceToHost) ); } #endif
abdc125daa05b14f3a03d62f2ee6a9239a5ebdac.hip
// !!! This is a file automatically generated by hipify!!! /** * NASA Advanced Supercomputing Parallel Benchmarks C++ * * based on NPB 3.3.1 * * original version and technical report: * http://www.nas.nasa.gov/Software/NPB/ * * Authors: * D. Bailey * W. Saphir * * C++ version: * Dalvan Griebler <[email protected]> * Jnior Lff <[email protected]> * Gabriell Araujo <[email protected]> * * CUDA version: * Gabriell Araujo <[email protected]> */ #include <omp.h> #include <hip/hip_runtime.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" /* * --------------------------------------------------------------------- * u0, u1, u2 are the main arrays in the problem. * depending on the decomposition, these arrays will have different * dimensions. to accomodate all possibilities, we allocate them as * one-dimensional arrays and pass them to subroutines for different * views * - u0 contains the initial (transformed) initial condition * - u1 and u2 are working arrays * - twiddle contains exponents for the time evolution operator. * --------------------------------------------------------------------- * large arrays are in common so that they are allocated on the * heap rather than the stack. this common block is not * referenced directly anywhere else. padding is to avoid accidental * cache problems, since all array sizes are powers of two. * --------------------------------------------------------------------- * we need a bunch of logic to keep track of how * arrays are laid out. * * note: this serial version is the derived from the parallel 0D case * of the ft NPB. * the computation proceeds logically as * * set up initial conditions * fftx(1) * transpose (1->2) * ffty(2) * transpose (2->3) * fftz(3) * time evolution * fftz(3) * transpose (3->2) * ffty(2) * transpose (2->1) * fftx(1) * compute residual(1) * * for the 0D, 1D, 2D strategies, the layouts look like xxx * * 0D 1D 2D * 1: xyz xyz xyz * 2: xyz xyz yxz * 3: xyz zyx zxy * the array dimensions are stored in dims(coord, phase) * --------------------------------------------------------------------- * if processor array is 1x1 -> 0D grid decomposition * * cache blocking params. these values are good for most * RISC processors. * FFT parameters: * fftblock controls how many ffts are done at a time. * the default is appropriate for most cache-based machines * on vector machines, the FFT can be vectorized with vector * length equal to the block size, so the block size should * be as large as possible. this is the size of the smallest * dimension of the problem: 128 for class A, 256 for class B * and 512 for class C. * --------------------------------------------------------------------- */ #define FFTBLOCK_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCKPAD_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCK (FFTBLOCK_DEFAULT) #define FFTBLOCKPAD (FFTBLOCKPAD_DEFAULT) #define SEED (314159265.0) #define A (1220703125.0) #define PI (3.141592653589793238) #define ALPHA (1.0e-6) #define AP (-4.0*ALPHA*PI*PI) #define T_TOTAL (1) #define T_SETUP (2) #define T_FFT (3) #define T_EVOLVE (4) #define T_CHECKSUM (5) #define T_FFTX (6) #define T_FFTY (7) #define T_FFTZ (8) #define T_MAX (8) #define CHECKSUM_TASKS (1024) #define THREADS_PER_BLOCK_AT_CHECKSUM (128) #define DEFAULT_GPU (0) #define OMP_THREADS (3) #define COMPUTE_INDEXMAP (0) #define COMPUTE_INITIAL_CONDITIONS (1) #define COMPUTE_FFT_INIT (2) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static dcomplex sums[NITER_DEFAULT+1]; static double twiddle[NTOTAL]; static dcomplex u[MAXDIM]; static dcomplex u0[NTOTAL]; static dcomplex u1[NTOTAL]; static int dims[3]; #else static dcomplex (*sums)=(dcomplex*)malloc(sizeof(dcomplex)*(NITER_DEFAULT+1)); static double (*twiddle)=(double*)malloc(sizeof(double)*(NTOTAL)); static dcomplex (*u)=(dcomplex*)malloc(sizeof(dcomplex)*(MAXDIM)); static dcomplex (*u0)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static dcomplex (*u1)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static int (*dims)=(int*)malloc(sizeof(int)*(3)); #endif static int niter; static boolean timers_enabled; /* gpu variables */ int THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP; int THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS; int THREADS_PER_BLOCK_AT_INIT_UI; int THREADS_PER_BLOCK_AT_EVOLVE; int THREADS_PER_BLOCK_AT_FFT1; int THREADS_PER_BLOCK_AT_FFT2; int THREADS_PER_BLOCK_AT_FFT3; dcomplex* sums_device; double* starts_device; double* twiddle_device; dcomplex* u_device; dcomplex* u0_device; dcomplex* u1_device; dcomplex* u2_device; dcomplex* y0_device; dcomplex* y1_device; size_t size_sums_device; size_t size_starts_device; size_t size_twiddle_device; size_t size_u_device; size_t size_u0_device; size_t size_u1_device; size_t size_y0_device; size_t size_y1_device; /* function declarations */ static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts1_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts2_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg); __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg); __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts3_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void checksum_gpu(int iteration, dcomplex u1[]); __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]); static void compute_indexmap_gpu(double twiddle[]); __global__ void compute_indexmap_gpu_kernel(double twiddle[]); static void compute_initial_conditions_gpu(dcomplex u0[]); __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]); static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]); static void fft_init_gpu(int n); static int ilog2(int n); __device__ int ilog2_device(int n); static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void ipow46(double a, int exponent, double* result); __device__ void ipow46_device(double a, int exponent, double* result); static void print_timers(); __device__ double randlc_device(double* x, double a); static void release_gpu(); static void setup(); static void setup_gpu(); static void verify (int d1, int d2, int d3, int nt, boolean* verified, char* class_npb); __device__ void vranlc_device(int n, double* x_seed, double a, double y[]); /* ft */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif int i; int iter=0; double total_time, mflops; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * run the entire problem once to make sure all data is touched. * this reduces variable startup costs, which is important for such a * short benchmark. the other NPB 2 implementations are similar. * --------------------------------------------------------------------- */ for(i=0; i<T_MAX; i++){ timer_clear(i); } setup(); setup_gpu(); init_ui_gpu(u0_device, u1_device, twiddle_device); #pragma omp parallel { if(omp_get_thread_num()==COMPUTE_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==COMPUTE_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==COMPUTE_FFT_INIT){ fft_init_gpu(MAXDIM); } }hipDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); /* * --------------------------------------------------------------------- * start over from the beginning. note that all operations must * be timed, in contrast to other benchmarks. * --------------------------------------------------------------------- */ for(i=0; i<T_MAX; i++){ timer_clear(i); } timer_start(T_TOTAL); if(timers_enabled==TRUE){timer_start(T_SETUP);} #pragma omp parallel { if(omp_get_thread_num()==COMPUTE_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==COMPUTE_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==COMPUTE_FFT_INIT){ fft_init_gpu(MAXDIM); } }hipDeviceSynchronize(); if(timers_enabled==TRUE){timer_stop(T_SETUP);} if(timers_enabled==TRUE){timer_start(T_FFT);} fft_gpu(1, u1_device, u0_device); if(timers_enabled==TRUE){timer_stop(T_FFT);} for(iter=1; iter<=niter; iter++){ if(timers_enabled==TRUE){timer_start(T_EVOLVE);} evolve_gpu(u0_device, u1_device, twiddle_device); if(timers_enabled==TRUE){timer_stop(T_EVOLVE);} if(timers_enabled==TRUE){timer_start(T_FFT);} fft_gpu(-1, u1_device, u1_device); if(timers_enabled==TRUE){timer_stop(T_FFT);} if(timers_enabled==TRUE){timer_start(T_CHECKSUM);} checksum_gpu(iter, u1_device); if(timers_enabled==TRUE){timer_stop(T_CHECKSUM);} } hipMemcpy(sums, sums_device, size_sums_device, hipMemcpyDeviceToHost); for(iter=1; iter<=niter; iter++){ printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums[iter].real, sums[iter].imag); } verify(NX, NY, NZ, niter, &verified, &class_npb); timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if(total_time != 0.0){ mflops = 1.0e-6 * ((double)(NTOTAL)) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL)))*niter) / total_time; }else{ mflops = 0.0; } c_print_results((char*)"FT", class_npb, NX, NY, NZ, niter, total_time, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); if(timers_enabled==TRUE){print_timers();} release_gpu(); return 0; } static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTX);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); int blocks_per_grid_kernel_2=ceil(double(NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); hipLaunchKernelGGL(( cffts1_gpu_kernel_1), dim3(blocks_per_grid_kernel_1), dim3(THREADS_PER_BLOCK_AT_FFT1), 0, 0, x_in, y0); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts1_gpu_kernel_2), dim3(blocks_per_grid_kernel_2), dim3(THREADS_PER_BLOCK_AT_FFT1), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts1_gpu_kernel_3), dim3(blocks_per_grid_kernel_3), dim3(THREADS_PER_BLOCK_AT_FFT1), 0, 0, x_out, y0); hipDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTX);} } /* * ---------------------------------------------------------------------- * y0[z][x][y] = x_in[z][y][x] * * y0[y + x*NY + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); y0[y+(x*NY)+(z*NX*NY)].real = x_in[x_y_z].real; y0[y+(x*NY)+(z*NX*NY)].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = j + variable*NY + k*NX*NY | variable is i and transforms x axis * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int y_z = blockIdx.x * blockDim.x + threadIdx.x; if(y_z >= (NY*NZ)){ return; } int j, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; j = y_z % NY; /* j = y */ k = (y_z / NY) % NZ; /* k = z */ const int logd1 = ilog2_device(NX); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd1; l+=2){ n1 = NX / 2; lk = 1 << (l - 1); li = 1 << (logd1 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][j] */ x11_real = gty1[j + (i11+k1)*NY + k*NX*NY].real; x11_imag = gty1[j + (i11+k1)*NY + k*NX*NY].imag; /* gty1[k][i12+k1][j] */ x21_real = gty1[j + (i12+k1)*NY + k*NX*NY].real; x21_imag = gty1[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty2[j + (i21+k1)*NY + k*NX*NY].real = x11_real + x21_real; gty2[j + (i21+k1)*NY + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][j] */ gty2[j + (i22+k1)*NY + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[j + (i22+k1)*NY + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd1){ for(j1=0; j1<NX; j1++){ /* gty1[k][j1][j] */ gty1[j + j1*NY + k*NX*NY].real = gty2[j + j1*NY + k*NX*NY].real; gty1[j + j1*NY + k*NX*NY].imag = gty2[j + j1*NY + k*NX*NY].imag; } }else{ n1 = NX / 2; lk = 1 << (l+1 - 1); li = 1 << (logd1 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][j] */ x12_real = gty2[j + (i11+k1)*NY + k*NX*NY].real; x12_imag = gty2[j + (i11+k1)*NY + k*NX*NY].imag; /* gty2[k][i12+k1][j] */ x22_real = gty2[j + (i12+k1)*NY + k*NX*NY].real; x22_imag = gty2[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty1[j + (i21+k1)*NY + k*NX*NY].real = x12_real + x22_real; gty1[j + (i21+k1)*NY + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][j] */ gty1[j + (i22+k1)*NY + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[j + (i22+k1)*NY + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][x][y] * * x_out[x + y*NX + z*NX*NY] = y0[y + x*NY + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); x_out[x_y_z].real = y0[y+(x*NY)+(z*NX*NY)].real; x_out[x_y_z].imag = y0[y+(x*NY)+(z*NX*NY)].imag; } static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTY);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); int blocks_per_grid_kernel_2=ceil(double(NX*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); hipLaunchKernelGGL(( cffts2_gpu_kernel_1), dim3(blocks_per_grid_kernel_1), dim3(THREADS_PER_BLOCK_AT_FFT2), 0, 0, x_in, y0); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts2_gpu_kernel_2), dim3(blocks_per_grid_kernel_2), dim3(THREADS_PER_BLOCK_AT_FFT2), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts2_gpu_kernel_3), dim3(blocks_per_grid_kernel_3), dim3(THREADS_PER_BLOCK_AT_FFT2), 0, 0, x_out, y0); hipDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTY);} } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + variable*NX + k*NX*NY | variable is j and transforms y axis * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_z >= (NX*NZ)){ return; } int i, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; i = x_z % NX; /* i = x */ k = (x_z / NX) % NZ; /* k = z */ const int logd2 = ilog2_device(NY); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd2; l+=2){ n1 = NY / 2; lk = 1 << (l - 1); li = 1 << (logd2 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][i] */ x11_real = gty1[i + (i11+k1)*NX + k*NX*NY].real; x11_imag = gty1[i + (i11+k1)*NX + k*NX*NY].imag; /* gty1[k][i12+k1][i] */ x21_real = gty1[i + (i12+k1)*NX + k*NX*NY].real; x21_imag = gty1[i + (i12+k1)*NX + k*NX*NY].imag; /* gty2[k][i21+k1][i] */ gty2[i + (i21+k1)*NX + k*NX*NY].real = x11_real + x21_real; gty2[i + (i21+k1)*NX + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][i] */ gty2[i + (i22+k1)*NX + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[i + (i22+k1)*NX + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd2){ for(j1=0; j1<NY; j1++){ /* gty1[k][j1][i] */ gty1[i + j1*NX + k*NX*NY].real = gty2[i + j1*NX + k*NX*NY].real; gty1[i + j1*NX + k*NX*NY].imag = gty2[i + j1*NX + k*NX*NY].imag; } } else{ n1 = NY / 2; lk = 1 << (l+1 - 1); li = 1 << (logd2 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][i] */ x12_real = gty2[i + (i11+k1)*NX + k*NX*NY].real; x12_imag = gty2[i + (i11+k1)*NX + k*NX*NY].imag; /* gty2[k][i12+k1][i] */ x22_real = gty2[i + (i12+k1)*NX + k*NX*NY].real; x22_imag = gty2[i + (i12+k1)*NX + k*NX*NY].imag; /* gty1[k][i21+k1][i] */ gty1[i + (i21+k1)*NX + k*NX*NY].real = x12_real + x22_real; gty1[i + (i21+k1)*NX + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][i] */ gty1[i + (i22+k1)*NX + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[i + (i22+k1)*NX + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTZ);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT3)); int blocks_per_grid_kernel_2=ceil(double(NX*NY)/double(THREADS_PER_BLOCK_AT_FFT3)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT3)); hipLaunchKernelGGL(( cffts3_gpu_kernel_1), dim3(blocks_per_grid_kernel_1), dim3(THREADS_PER_BLOCK_AT_FFT3), 0, 0, x_in, y0); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts3_gpu_kernel_2), dim3(blocks_per_grid_kernel_2), dim3(THREADS_PER_BLOCK_AT_FFT3), 0, 0, is, y0, y1, u); hipDeviceSynchronize(); hipLaunchKernelGGL(( cffts3_gpu_kernel_3), dim3(blocks_per_grid_kernel_3), dim3(THREADS_PER_BLOCK_AT_FFT3), 0, 0, x_out, y0); hipDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTZ);} } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg){ int j,l; /* * --------------------------------------------------------------------- * perform one variant of the Stockham FFT. * --------------------------------------------------------------------- */ for(l=1; l<=m; l+=2){ cffts3_gpu_fftz2_device(is, l, m, n, u_device, x, y, index_arg, size_arg); if(l==m){break;} cffts3_gpu_fftz2_device(is, l + 1, m, n, u_device, y, x, index_arg, size_arg); } /* * --------------------------------------------------------------------- * copy Y to X. * --------------------------------------------------------------------- */ if(m%2==1){ for(j=0; j<n; j++){ x[j*size_arg+index_arg].real = y[j*size_arg+index_arg].real; x[j*size_arg+index_arg].imag = y[j*size_arg+index_arg].imag; } } } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg){ int k,n1,li,lj,lk,ku,i,i11,i12,i21,i22; double x11real, x11imag; double x21real, x21imag; dcomplex u1; /* * --------------------------------------------------------------------- * set initial parameters. * --------------------------------------------------------------------- */ n1 = n / 2; lk = 1 << (l - 1); li = 1 << (m - l); lj = 2 * lk; ku = li; for(i=0; i<li; i++){ i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if(is>=1){ u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; }else{ u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } for(k=0; k<lk; k++){ x11real = x[(i11+k)*size_arg+index_arg].real; x11imag = x[(i11+k)*size_arg+index_arg].imag; x21real = x[(i12+k)*size_arg+index_arg].real; x21imag = x[(i12+k)*size_arg+index_arg].imag; y[(i21+k)*size_arg+index_arg].real = x11real + x21real; y[(i21+k)*size_arg+index_arg].imag = x11imag + x21imag; y[(i22+k)*size_arg+index_arg].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[(i22+k)*size_arg+index_arg].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_y = blockIdx.x * blockDim.x + threadIdx.x; if(x_y >= (NX*NY)){ return; } cffts3_gpu_cfftz_device(is, ilog2_device(NZ), NZ, gty1 , gty2, u_device, x_y /* index_arg */, NX*NY /* size_arg */); } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void checksum_gpu(int iteration, dcomplex u1[]){ int blocks_per_grid=ceil(double(CHECKSUM_TASKS)/double(THREADS_PER_BLOCK_AT_CHECKSUM)); hipLaunchKernelGGL(( checksum_gpu_kernel), dim3(blocks_per_grid), dim3(THREADS_PER_BLOCK_AT_CHECKSUM), 0, 0, iteration, u1, sums_device); hipDeviceSynchronize(); } __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]){ __shared__ dcomplex share_sums[THREADS_PER_BLOCK_AT_CHECKSUM]; int j = (blockIdx.x * blockDim.x + threadIdx.x) + 1; int q, r, s; if(j<=CHECKSUM_TASKS){ q = j % NX; r = 3*j % NY; s = 5*j % NZ; share_sums[threadIdx.x] = u1[ q + r*NX + s*NX*NY ]; }else{ share_sums[threadIdx.x] = dcomplex_create(0.0, 0.0); } __syncthreads(); for(int i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ share_sums[threadIdx.x] = dcomplex_add(share_sums[threadIdx.x], share_sums[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0){ share_sums[0].real = share_sums[0].real/(double)(NTOTAL); atomicAdd(&sums[iteration].real,share_sums[0].real); share_sums[0].imag = share_sums[0].imag/(double)(NTOTAL); atomicAdd(&sums[iteration].imag,share_sums[0].imag); } } static void compute_indexmap_gpu(double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP)); hipLaunchKernelGGL(( compute_indexmap_gpu_kernel), dim3(blocks_per_grid), dim3(THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP), 0, 0, twiddle); } __global__ void compute_indexmap_gpu_kernel(double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } int i = thread_id % NX; int j = (thread_id / NX) % NY; int k = thread_id / (NX * NY); int kk, kk2, jj, kj2, ii; kk = ((k+NZ/2) % NZ) - NZ/2; kk2 = kk*kk; jj = ((j+NY/2) % NY) - NY/2; kj2 = jj*jj+kk2; ii = ((i+NX/2) % NX) - NX/2; twiddle[thread_id] = exp(AP*(double)(ii*ii+kj2)); } static void compute_initial_conditions_gpu(dcomplex u0[]){ int z; double start, an, starts[NZ]; start = SEED; ipow46(A, 0, &an); randlc(&start, an); ipow46(A, 2*NX*NY, &an); starts[0] = start; for(z=1; z<NZ; z++){ randlc(&start, an); starts[z] = start; } hipMemcpy(starts_device, starts, size_starts_device, hipMemcpyHostToDevice); int blocks_per_grid=ceil(double(NZ)/double(THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS)); hipLaunchKernelGGL(( compute_initial_conditions_gpu_kernel), dim3(blocks_per_grid), dim3(THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS), 0, 0, u0, starts_device); } __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]){ int z = blockIdx.x * blockDim.x + threadIdx.x; if(z>=NZ){return;} double x0 = starts[z]; for(int y=0; y<NY; y++){ vranlc_device(2*NX, &x0, A, (double*)&u0[ 0 + y*NX + z*NX*NY ]); } } static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_EVOLVE)); hipLaunchKernelGGL(( evolve_gpu_kernel), dim3(blocks_per_grid), dim3(THREADS_PER_BLOCK_AT_EVOLVE), 0, 0, u0, u1, twiddle); hipDeviceSynchronize(); } __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=(NZ*NY*NX)){ return; } u0[thread_id] = dcomplex_mul2(u0[thread_id], twiddle[thread_id]); u1[thread_id] = u0[thread_id]; } static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]){ /* * --------------------------------------------------------------------- * note: args x1, x2 must be different arrays * note: args for cfftsx are (direction, layout, xin, xout, scratch) * xin/xout may be the same and it can be somewhat faster * if they are * --------------------------------------------------------------------- */ if(dir==1){ cffts1_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts3_gpu(1, u_device, x1, x2, y0_device, y1_device); }else{ cffts3_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts1_gpu(-1, u_device, x1, x2, y0_device, y1_device); } } static void fft_init_gpu(int n){ int m,ku,i,j,ln; double t, ti; /* * --------------------------------------------------------------------- * initialize the U array with sines and cosines in a manner that permits * stride one access at each FFT iteration. * --------------------------------------------------------------------- */ m = ilog2(n); u[0] = dcomplex_create((double)m, 0.0); ku = 2; ln = 1; for(j=1; j<=m; j++){ t = PI / ln; for(i=0; i<=ln-1; i++){ ti = i * t; u[i+ku-1] = dcomplex_create(cos(ti), sin(ti)); } ku = ku + ln; ln = 2 * ln; } hipMemcpy(u_device, u, size_u_device, hipMemcpyHostToDevice); } static int ilog2(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } __device__ int ilog2_device(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_INIT_UI)); hipLaunchKernelGGL(( init_ui_gpu_kernel), dim3(blocks_per_grid), dim3(THREADS_PER_BLOCK_AT_EVOLVE), 0, 0, u0, u1, twiddle); hipDeviceSynchronize(); } __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } u0[thread_id] = dcomplex_create(0.0, 0.0); u1[thread_id] = dcomplex_create(0.0, 0.0); twiddle[thread_id] = 0.0; } static void ipow46(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc(&q, q); n = n2; }else{ randlc(&r, q); n = n-1; } } randlc(&r, q); *result = r; } __device__ void ipow46_device(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc_device(&q, q); n = n2; }else{ randlc_device(&r, q); n = n-1; } } randlc_device(&r, q); *result = r; } static void print_timers(){ int i; double t, t_m; char* tstrings[T_MAX+1]; tstrings[1] = (char*)" total "; tstrings[2] = (char*)" setup "; tstrings[3] = (char*)" fft "; tstrings[4] = (char*)" evolve "; tstrings[5] = (char*)" checksum "; tstrings[6] = (char*)" fftx "; tstrings[7] = (char*)" ffty "; tstrings[8] = (char*)" fftz "; t_m = timer_read(T_TOTAL); if(t_m <= 0.0){t_m = 1.00;} for(i = 1; i <= T_MAX; i++){ t = timer_read(i); printf(" timer %2d(%16s) :%9.4f (%6.2f%%)\n", i, tstrings[i], t, t*100.0/t_m); } } __device__ double randlc_device(double* x, double a){ double t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; t1 = R23 * (*x); x1 = (int)t1; x2 = (*x) - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); (*x) = t3 - T46 * t4; return (R46 * (*x)); } static void release_gpu(){ hipFree(sums_device); hipFree(starts_device); hipFree(twiddle_device); hipFree(u_device); hipFree(u0_device); hipFree(u1_device); hipFree(y0_device); hipFree(y1_device); } static void setup(){ FILE* fp; if((fp = fopen("timer.flag", "r")) != NULL){ timers_enabled = TRUE; fclose(fp); }else{ timers_enabled = FALSE; } niter = NITER_DEFAULT; printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - FT Benchmark\n\n"); printf(" Size : %4dx%4dx%4d\n", NX, NY, NZ); printf(" Iterations :%7d\n", niter); printf("\n"); } static void setup_gpu(){ hipDeviceProp_t deviceProp; hipSetDevice(DEFAULT_GPU); hipGetDeviceProperties(&deviceProp, DEFAULT_GPU); THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS = 128; THREADS_PER_BLOCK_AT_INIT_UI = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_EVOLVE = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT1 = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT2 = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT3 = deviceProp.maxThreadsPerBlock; size_sums_device=sizeof(dcomplex)*(NITER_DEFAULT+1); size_starts_device=sizeof(double)*(NZ); size_twiddle_device=sizeof(double)*(NTOTAL); size_u_device=sizeof(dcomplex)*(MAXDIM); size_u0_device=sizeof(dcomplex)*(NTOTAL); size_u1_device=sizeof(dcomplex)*(NTOTAL); size_y0_device=sizeof(dcomplex)*(NTOTAL); size_y1_device=sizeof(dcomplex)*(NTOTAL); hipMalloc(&sums_device, size_sums_device); hipMalloc(&starts_device, size_starts_device); hipMalloc(&twiddle_device, size_twiddle_device); hipMalloc(&u_device, size_u_device); hipMalloc(&u0_device, size_u0_device); hipMalloc(&u1_device, size_u1_device); hipMalloc(&y0_device, size_y0_device); hipMalloc(&y1_device, size_y1_device); omp_set_num_threads(OMP_THREADS); } static void verify(int d1, int d2, int d3, int nt, boolean* verified, char* class_npb){ int i; double err, epsilon; /* * --------------------------------------------------------------------- * reference checksums * --------------------------------------------------------------------- */ dcomplex csum_ref[25+1]; *class_npb = 'U'; epsilon = 1.0e-12; *verified = false; if(d1 == 64 && d2 == 64 && d3 == 64 && nt == 6){ /* * --------------------------------------------------------------------- * sample size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'S'; csum_ref[1] = dcomplex_create(5.546087004964E+02, 4.845363331978E+02); csum_ref[2] = dcomplex_create(5.546385409189E+02, 4.865304269511E+02); csum_ref[3] = dcomplex_create(5.546148406171E+02, 4.883910722336E+02); csum_ref[4] = dcomplex_create(5.545423607415E+02, 4.901273169046E+02); csum_ref[5] = dcomplex_create(5.544255039624E+02, 4.917475857993E+02); csum_ref[6] = dcomplex_create(5.542683411902E+02, 4.932597244941E+02); }else if(d1 == 128 && d2 == 128 && d3 == 32 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb W size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'W'; csum_ref[1] = dcomplex_create(5.673612178944E+02, 5.293246849175E+02); csum_ref[2] = dcomplex_create(5.631436885271E+02, 5.282149986629E+02); csum_ref[3] = dcomplex_create(5.594024089970E+02, 5.270996558037E+02); csum_ref[4] = dcomplex_create(5.560698047020E+02, 5.260027904925E+02); csum_ref[5] = dcomplex_create(5.530898991250E+02, 5.249400845633E+02); csum_ref[6] = dcomplex_create(5.504159734538E+02, 5.239212247086E+02); }else if(d1 == 256 && d2 == 256 && d3 == 128 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb A size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'A'; csum_ref[1] = dcomplex_create(5.046735008193E+02, 5.114047905510E+02); csum_ref[2] = dcomplex_create(5.059412319734E+02, 5.098809666433E+02); csum_ref[3] = dcomplex_create(5.069376896287E+02, 5.098144042213E+02); csum_ref[4] = dcomplex_create(5.077892868474E+02, 5.101336130759E+02); csum_ref[5] = dcomplex_create(5.085233095391E+02, 5.104914655194E+02); csum_ref[6] = dcomplex_create(5.091487099959E+02, 5.107917842803E+02); }else if(d1 == 512 && d2 == 256 && d3 == 256 && nt == 20){ /* * -------------------------------------------------------------------- * class_npb B size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'B'; csum_ref[1] = dcomplex_create(5.177643571579E+02, 5.077803458597E+02); csum_ref[2] = dcomplex_create(5.154521291263E+02, 5.088249431599E+02); csum_ref[3] = dcomplex_create(5.146409228649E+02, 5.096208912659E+02); csum_ref[4] = dcomplex_create(5.142378756213E+02, 5.101023387619E+02); csum_ref[5] = dcomplex_create(5.139626667737E+02, 5.103976610617E+02); csum_ref[6] = dcomplex_create(5.137423460082E+02, 5.105948019802E+02); csum_ref[7] = dcomplex_create(5.135547056878E+02, 5.107404165783E+02); csum_ref[8] = dcomplex_create(5.133910925466E+02, 5.108576573661E+02); csum_ref[9] = dcomplex_create(5.132470705390E+02, 5.109577278523E+02); csum_ref[10] = dcomplex_create(5.131197729984E+02, 5.110460304483E+02); csum_ref[11] = dcomplex_create(5.130070319283E+02, 5.111252433800E+02); csum_ref[12] = dcomplex_create(5.129070537032E+02, 5.111968077718E+02); csum_ref[13] = dcomplex_create(5.128182883502E+02, 5.112616233064E+02); csum_ref[14] = dcomplex_create(5.127393733383E+02, 5.113203605551E+02); csum_ref[15] = dcomplex_create(5.126691062020E+02, 5.113735928093E+02); csum_ref[16] = dcomplex_create(5.126064276004E+02, 5.114218460548E+02); csum_ref[17] = dcomplex_create(5.125504076570E+02, 5.114656139760E+02); csum_ref[18] = dcomplex_create(5.125002331720E+02, 5.115053595966E+02); csum_ref[19] = dcomplex_create(5.124551951846E+02, 5.115415130407E+02); csum_ref[20] = dcomplex_create(5.124146770029E+02, 5.115744692211E+02); }else if(d1 == 512 && d2 == 512 && d3 == 512 && nt == 20){ /* * --------------------------------------------------------------------- * class_npb C size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'C'; csum_ref[1] = dcomplex_create(5.195078707457E+02, 5.149019699238E+02); csum_ref[2] = dcomplex_create(5.155422171134E+02, 5.127578201997E+02); csum_ref[3] = dcomplex_create(5.144678022222E+02, 5.122251847514E+02); csum_ref[4] = dcomplex_create(5.140150594328E+02, 5.121090289018E+02); csum_ref[5] = dcomplex_create(5.137550426810E+02, 5.121143685824E+02); csum_ref[6] = dcomplex_create(5.135811056728E+02, 5.121496764568E+02); csum_ref[7] = dcomplex_create(5.134569343165E+02, 5.121870921893E+02); csum_ref[8] = dcomplex_create(5.133651975661E+02, 5.122193250322E+02); csum_ref[9] = dcomplex_create(5.132955192805E+02, 5.122454735794E+02); csum_ref[10] = dcomplex_create(5.132410471738E+02, 5.122663649603E+02); csum_ref[11] = dcomplex_create(5.131971141679E+02, 5.122830879827E+02); csum_ref[12] = dcomplex_create(5.131605205716E+02, 5.122965869718E+02); csum_ref[13] = dcomplex_create(5.131290734194E+02, 5.123075927445E+02); csum_ref[14] = dcomplex_create(5.131012720314E+02, 5.123166486553E+02); csum_ref[15] = dcomplex_create(5.130760908195E+02, 5.123241541685E+02); csum_ref[16] = dcomplex_create(5.130528295923E+02, 5.123304037599E+02); csum_ref[17] = dcomplex_create(5.130310107773E+02, 5.123356167976E+02); csum_ref[18] = dcomplex_create(5.130103090133E+02, 5.123399592211E+02); csum_ref[19] = dcomplex_create(5.129905029333E+02, 5.123435588985E+02); csum_ref[20] = dcomplex_create(5.129714421109E+02, 5.123465164008E+02); }else if(d1 == 2048 && d2 == 1024 && d3 == 1024 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb D size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'D'; csum_ref[1] = dcomplex_create(5.122230065252E+02, 5.118534037109E+02); csum_ref[2] = dcomplex_create(5.120463975765E+02, 5.117061181082E+02); csum_ref[3] = dcomplex_create(5.119865766760E+02, 5.117096364601E+02); csum_ref[4] = dcomplex_create(5.119518799488E+02, 5.117373863950E+02); csum_ref[5] = dcomplex_create(5.119269088223E+02, 5.117680347632E+02); csum_ref[6] = dcomplex_create(5.119082416858E+02, 5.117967875532E+02); csum_ref[7] = dcomplex_create(5.118943814638E+02, 5.118225281841E+02); csum_ref[8] = dcomplex_create(5.118842385057E+02, 5.118451629348E+02); csum_ref[9] = dcomplex_create(5.118769435632E+02, 5.118649119387E+02); csum_ref[10] = dcomplex_create(5.118718203448E+02, 5.118820803844E+02); csum_ref[11] = dcomplex_create(5.118683569061E+02, 5.118969781011E+02); csum_ref[12] = dcomplex_create(5.118661708593E+02, 5.119098918835E+02); csum_ref[13] = dcomplex_create(5.118649768950E+02, 5.119210777066E+02); csum_ref[14] = dcomplex_create(5.118645605626E+02, 5.119307604484E+02); csum_ref[15] = dcomplex_create(5.118647586618E+02, 5.119391362671E+02); csum_ref[16] = dcomplex_create(5.118654451572E+02, 5.119463757241E+02); csum_ref[17] = dcomplex_create(5.118665212451E+02, 5.119526269238E+02); csum_ref[18] = dcomplex_create(5.118679083821E+02, 5.119580184108E+02); csum_ref[19] = dcomplex_create(5.118695433664E+02, 5.119626617538E+02); csum_ref[20] = dcomplex_create(5.118713748264E+02, 5.119666538138E+02); csum_ref[21] = dcomplex_create(5.118733606701E+02, 5.119700787219E+02); csum_ref[22] = dcomplex_create(5.118754661974E+02, 5.119730095953E+02); csum_ref[23] = dcomplex_create(5.118776626738E+02, 5.119755100241E+02); csum_ref[24] = dcomplex_create(5.118799262314E+02, 5.119776353561E+02); csum_ref[25] = dcomplex_create(5.118822370068E+02, 5.119794338060E+02); }else if(d1 == 4096 && d2 == 2048 && d3 == 2048 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb E size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'E'; csum_ref[1] = dcomplex_create(5.121601045346E+02, 5.117395998266E+02); csum_ref[2] = dcomplex_create(5.120905403678E+02, 5.118614716182E+02); csum_ref[3] = dcomplex_create(5.120623229306E+02, 5.119074203747E+02); csum_ref[4] = dcomplex_create(5.120438418997E+02, 5.119345900733E+02); csum_ref[5] = dcomplex_create(5.120311521872E+02, 5.119551325550E+02); csum_ref[6] = dcomplex_create(5.120226088809E+02, 5.119720179919E+02); csum_ref[7] = dcomplex_create(5.120169296534E+02, 5.119861371665E+02); csum_ref[8] = dcomplex_create(5.120131225172E+02, 5.119979364402E+02); csum_ref[9] = dcomplex_create(5.120104767108E+02, 5.120077674092E+02); csum_ref[10] = dcomplex_create(5.120085127969E+02, 5.120159443121E+02); csum_ref[11] = dcomplex_create(5.120069224127E+02, 5.120227453670E+02); csum_ref[12] = dcomplex_create(5.120055158164E+02, 5.120284096041E+02); csum_ref[13] = dcomplex_create(5.120041820159E+02, 5.120331373793E+02); csum_ref[14] = dcomplex_create(5.120028605402E+02, 5.120370938679E+02); csum_ref[15] = dcomplex_create(5.120015223011E+02, 5.120404138831E+02); csum_ref[16] = dcomplex_create(5.120001570022E+02, 5.120432068837E+02); csum_ref[17] = dcomplex_create(5.119987650555E+02, 5.120455615860E+02); csum_ref[18] = dcomplex_create(5.119973525091E+02, 5.120475499442E+02); csum_ref[19] = dcomplex_create(5.119959279472E+02, 5.120492304629E+02); csum_ref[20] = dcomplex_create(5.119945006558E+02, 5.120506508902E+02); csum_ref[21] = dcomplex_create(5.119930795911E+02, 5.120518503782E+02); csum_ref[22] = dcomplex_create(5.119916728462E+02, 5.120528612016E+02); csum_ref[23] = dcomplex_create(5.119902874185E+02, 5.120537101195E+02); csum_ref[24] = dcomplex_create(5.119889291565E+02, 5.120544194514E+02); csum_ref[25] = dcomplex_create(5.119876028049E+02, 5.120550079284E+02); } if(*class_npb != 'U'){ *verified = TRUE; for(i = 1; i <= nt; i++){ err = dcomplex_abs(dcomplex_div(dcomplex_sub(sums[i], csum_ref[i]), csum_ref[i])); if(!(err <= epsilon)){ *verified = FALSE; break; } } } if(*class_npb != 'U'){ if(*verified){ printf(" Result verification successful\n"); }else{ printf(" Result verification failed\n"); } } printf(" class_npb = %c\n", *class_npb); } __device__ void vranlc_device(int n, double* x_seed, double a, double y[]){ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; x = *x_seed; for(i=0; i<n; i++){ t1 = R23 * x; x1 = (int)t1; x2 = x - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); x = t3 - T46 * t4; y[i] = R46 * x; } *x_seed = x; }
abdc125daa05b14f3a03d62f2ee6a9239a5ebdac.cu
/** * NASA Advanced Supercomputing Parallel Benchmarks C++ * * based on NPB 3.3.1 * * original version and technical report: * http://www.nas.nasa.gov/Software/NPB/ * * Authors: * D. Bailey * W. Saphir * * C++ version: * Dalvan Griebler <[email protected]> * Júnior Löff <[email protected]> * Gabriell Araujo <[email protected]> * * CUDA version: * Gabriell Araujo <[email protected]> */ #include <omp.h> #include <cuda.h> #include "../common/npb-CPP.hpp" #include "npbparams.hpp" /* * --------------------------------------------------------------------- * u0, u1, u2 are the main arrays in the problem. * depending on the decomposition, these arrays will have different * dimensions. to accomodate all possibilities, we allocate them as * one-dimensional arrays and pass them to subroutines for different * views * - u0 contains the initial (transformed) initial condition * - u1 and u2 are working arrays * - twiddle contains exponents for the time evolution operator. * --------------------------------------------------------------------- * large arrays are in common so that they are allocated on the * heap rather than the stack. this common block is not * referenced directly anywhere else. padding is to avoid accidental * cache problems, since all array sizes are powers of two. * --------------------------------------------------------------------- * we need a bunch of logic to keep track of how * arrays are laid out. * * note: this serial version is the derived from the parallel 0D case * of the ft NPB. * the computation proceeds logically as * * set up initial conditions * fftx(1) * transpose (1->2) * ffty(2) * transpose (2->3) * fftz(3) * time evolution * fftz(3) * transpose (3->2) * ffty(2) * transpose (2->1) * fftx(1) * compute residual(1) * * for the 0D, 1D, 2D strategies, the layouts look like xxx * * 0D 1D 2D * 1: xyz xyz xyz * 2: xyz xyz yxz * 3: xyz zyx zxy * the array dimensions are stored in dims(coord, phase) * --------------------------------------------------------------------- * if processor array is 1x1 -> 0D grid decomposition * * cache blocking params. these values are good for most * RISC processors. * FFT parameters: * fftblock controls how many ffts are done at a time. * the default is appropriate for most cache-based machines * on vector machines, the FFT can be vectorized with vector * length equal to the block size, so the block size should * be as large as possible. this is the size of the smallest * dimension of the problem: 128 for class A, 256 for class B * and 512 for class C. * --------------------------------------------------------------------- */ #define FFTBLOCK_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCKPAD_DEFAULT (DEFAULT_BEHAVIOR) #define FFTBLOCK (FFTBLOCK_DEFAULT) #define FFTBLOCKPAD (FFTBLOCKPAD_DEFAULT) #define SEED (314159265.0) #define A (1220703125.0) #define PI (3.141592653589793238) #define ALPHA (1.0e-6) #define AP (-4.0*ALPHA*PI*PI) #define T_TOTAL (1) #define T_SETUP (2) #define T_FFT (3) #define T_EVOLVE (4) #define T_CHECKSUM (5) #define T_FFTX (6) #define T_FFTY (7) #define T_FFTZ (8) #define T_MAX (8) #define CHECKSUM_TASKS (1024) #define THREADS_PER_BLOCK_AT_CHECKSUM (128) #define DEFAULT_GPU (0) #define OMP_THREADS (3) #define COMPUTE_INDEXMAP (0) #define COMPUTE_INITIAL_CONDITIONS (1) #define COMPUTE_FFT_INIT (2) /* global variables */ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) static dcomplex sums[NITER_DEFAULT+1]; static double twiddle[NTOTAL]; static dcomplex u[MAXDIM]; static dcomplex u0[NTOTAL]; static dcomplex u1[NTOTAL]; static int dims[3]; #else static dcomplex (*sums)=(dcomplex*)malloc(sizeof(dcomplex)*(NITER_DEFAULT+1)); static double (*twiddle)=(double*)malloc(sizeof(double)*(NTOTAL)); static dcomplex (*u)=(dcomplex*)malloc(sizeof(dcomplex)*(MAXDIM)); static dcomplex (*u0)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static dcomplex (*u1)=(dcomplex*)malloc(sizeof(dcomplex)*(NTOTAL)); static int (*dims)=(int*)malloc(sizeof(int)*(3)); #endif static int niter; static boolean timers_enabled; /* gpu variables */ int THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP; int THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS; int THREADS_PER_BLOCK_AT_INIT_UI; int THREADS_PER_BLOCK_AT_EVOLVE; int THREADS_PER_BLOCK_AT_FFT1; int THREADS_PER_BLOCK_AT_FFT2; int THREADS_PER_BLOCK_AT_FFT3; dcomplex* sums_device; double* starts_device; double* twiddle_device; dcomplex* u_device; dcomplex* u0_device; dcomplex* u1_device; dcomplex* u2_device; dcomplex* y0_device; dcomplex* y1_device; size_t size_sums_device; size_t size_starts_device; size_t size_twiddle_device; size_t size_u_device; size_t size_u0_device; size_t size_u1_device; size_t size_y0_device; size_t size_y1_device; /* function declarations */ static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts1_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts2_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]); __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg); __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg); __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]); __global__ void cffts3_gpu_kernel_2(const int is, dcomplex y0[], dcomplex y1[], dcomplex u_device[]); __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]); static void checksum_gpu(int iteration, dcomplex u1[]); __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]); static void compute_indexmap_gpu(double twiddle[]); __global__ void compute_indexmap_gpu_kernel(double twiddle[]); static void compute_initial_conditions_gpu(dcomplex u0[]); __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]); static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]); static void fft_init_gpu(int n); static int ilog2(int n); __device__ int ilog2_device(int n); static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]); __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]); static void ipow46(double a, int exponent, double* result); __device__ void ipow46_device(double a, int exponent, double* result); static void print_timers(); __device__ double randlc_device(double* x, double a); static void release_gpu(); static void setup(); static void setup_gpu(); static void verify (int d1, int d2, int d3, int nt, boolean* verified, char* class_npb); __device__ void vranlc_device(int n, double* x_seed, double a, double y[]); /* ft */ int main(int argc, char** argv){ #if defined(DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION) printf(" DO_NOT_ALLOCATE_ARRAYS_WITH_DYNAMIC_MEMORY_AND_AS_SINGLE_DIMENSION mode on\n"); #endif int i; int iter=0; double total_time, mflops; boolean verified; char class_npb; /* * --------------------------------------------------------------------- * run the entire problem once to make sure all data is touched. * this reduces variable startup costs, which is important for such a * short benchmark. the other NPB 2 implementations are similar. * --------------------------------------------------------------------- */ for(i=0; i<T_MAX; i++){ timer_clear(i); } setup(); setup_gpu(); init_ui_gpu(u0_device, u1_device, twiddle_device); #pragma omp parallel { if(omp_get_thread_num()==COMPUTE_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==COMPUTE_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==COMPUTE_FFT_INIT){ fft_init_gpu(MAXDIM); } }cudaDeviceSynchronize(); fft_gpu(1, u1_device, u0_device); /* * --------------------------------------------------------------------- * start over from the beginning. note that all operations must * be timed, in contrast to other benchmarks. * --------------------------------------------------------------------- */ for(i=0; i<T_MAX; i++){ timer_clear(i); } timer_start(T_TOTAL); if(timers_enabled==TRUE){timer_start(T_SETUP);} #pragma omp parallel { if(omp_get_thread_num()==COMPUTE_INDEXMAP){ compute_indexmap_gpu(twiddle_device); }else if(omp_get_thread_num()==COMPUTE_INITIAL_CONDITIONS){ compute_initial_conditions_gpu(u1_device); }else if(omp_get_thread_num()==COMPUTE_FFT_INIT){ fft_init_gpu(MAXDIM); } }cudaDeviceSynchronize(); if(timers_enabled==TRUE){timer_stop(T_SETUP);} if(timers_enabled==TRUE){timer_start(T_FFT);} fft_gpu(1, u1_device, u0_device); if(timers_enabled==TRUE){timer_stop(T_FFT);} for(iter=1; iter<=niter; iter++){ if(timers_enabled==TRUE){timer_start(T_EVOLVE);} evolve_gpu(u0_device, u1_device, twiddle_device); if(timers_enabled==TRUE){timer_stop(T_EVOLVE);} if(timers_enabled==TRUE){timer_start(T_FFT);} fft_gpu(-1, u1_device, u1_device); if(timers_enabled==TRUE){timer_stop(T_FFT);} if(timers_enabled==TRUE){timer_start(T_CHECKSUM);} checksum_gpu(iter, u1_device); if(timers_enabled==TRUE){timer_stop(T_CHECKSUM);} } cudaMemcpy(sums, sums_device, size_sums_device, cudaMemcpyDeviceToHost); for(iter=1; iter<=niter; iter++){ printf("T = %5d Checksum = %22.12e %22.12e\n", iter, sums[iter].real, sums[iter].imag); } verify(NX, NY, NZ, niter, &verified, &class_npb); timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if(total_time != 0.0){ mflops = 1.0e-6 * ((double)(NTOTAL)) * (14.8157 + 7.19641 * log((double)(NTOTAL)) + (5.23518 + 7.21113 * log((double)(NTOTAL)))*niter) / total_time; }else{ mflops = 0.0; } c_print_results((char*)"FT", class_npb, NX, NY, NZ, niter, total_time, mflops, (char*)" floating point", verified, (char*)NPBVERSION, (char*)COMPILETIME, (char*)CS1, (char*)CS2, (char*)CS3, (char*)CS4, (char*)CS5, (char*)CS6, (char*)CS7); if(timers_enabled==TRUE){print_timers();} release_gpu(); return 0; } static void cffts1_gpu(const int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTX);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); int blocks_per_grid_kernel_2=ceil(double(NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT1)); cffts1_gpu_kernel_1<<<blocks_per_grid_kernel_1, THREADS_PER_BLOCK_AT_FFT1>>>(x_in, y0); cudaDeviceSynchronize(); cffts1_gpu_kernel_2<<<blocks_per_grid_kernel_2, THREADS_PER_BLOCK_AT_FFT1>>>(is, y0, y1, u); cudaDeviceSynchronize(); cffts1_gpu_kernel_3<<<blocks_per_grid_kernel_3, THREADS_PER_BLOCK_AT_FFT1>>>(x_out, y0); cudaDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTX);} } /* * ---------------------------------------------------------------------- * y0[z][x][y] = x_in[z][y][x] * * y0[y + x*NY + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); y0[y+(x*NY)+(z*NX*NY)].real = x_in[x_y_z].real; y0[y+(x*NY)+(z*NX*NY)].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = j + variable*NY + k*NX*NY | variable is i and transforms x axis * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int y_z = blockIdx.x * blockDim.x + threadIdx.x; if(y_z >= (NY*NZ)){ return; } int j, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; j = y_z % NY; /* j = y */ k = (y_z / NY) % NZ; /* k = z */ const int logd1 = ilog2_device(NX); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd1; l+=2){ n1 = NX / 2; lk = 1 << (l - 1); li = 1 << (logd1 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][j] */ x11_real = gty1[j + (i11+k1)*NY + k*NX*NY].real; x11_imag = gty1[j + (i11+k1)*NY + k*NX*NY].imag; /* gty1[k][i12+k1][j] */ x21_real = gty1[j + (i12+k1)*NY + k*NX*NY].real; x21_imag = gty1[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty2[j + (i21+k1)*NY + k*NX*NY].real = x11_real + x21_real; gty2[j + (i21+k1)*NY + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][j] */ gty2[j + (i22+k1)*NY + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[j + (i22+k1)*NY + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd1){ for(j1=0; j1<NX; j1++){ /* gty1[k][j1][j] */ gty1[j + j1*NY + k*NX*NY].real = gty2[j + j1*NY + k*NX*NY].real; gty1[j + j1*NY + k*NX*NY].imag = gty2[j + j1*NY + k*NX*NY].imag; } }else{ n1 = NX / 2; lk = 1 << (l+1 - 1); li = 1 << (logd1 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][j] */ x12_real = gty2[j + (i11+k1)*NY + k*NX*NY].real; x12_imag = gty2[j + (i11+k1)*NY + k*NX*NY].imag; /* gty2[k][i12+k1][j] */ x22_real = gty2[j + (i12+k1)*NY + k*NX*NY].real; x22_imag = gty2[j + (i12+k1)*NY + k*NX*NY].imag; /* gty2[k][i21+k1][j] */ gty1[j + (i21+k1)*NY + k*NX*NY].real = x12_real + x22_real; gty1[j + (i21+k1)*NY + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][j] */ gty1[j + (i22+k1)*NY + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[j + (i22+k1)*NY + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][x][y] * * x_out[x + y*NX + z*NX*NY] = y0[y + x*NY + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts1_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } int x = x_y_z % NX; int y = (x_y_z / NX) % NY; int z = x_y_z / (NX * NY); x_out[x_y_z].real = y0[y+(x*NY)+(z*NX*NY)].real; x_out[x_y_z].imag = y0[y+(x*NY)+(z*NX*NY)].imag; } static void cffts2_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTY);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); int blocks_per_grid_kernel_2=ceil(double(NX*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT2)); cffts2_gpu_kernel_1<<<blocks_per_grid_kernel_1, THREADS_PER_BLOCK_AT_FFT2>>>(x_in, y0); cudaDeviceSynchronize(); cffts2_gpu_kernel_2<<<blocks_per_grid_kernel_2, THREADS_PER_BLOCK_AT_FFT2>>>(is, y0, y1, u); cudaDeviceSynchronize(); cffts2_gpu_kernel_3<<<blocks_per_grid_kernel_3, THREADS_PER_BLOCK_AT_FFT2>>>(x_out, y0); cudaDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTY);} } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + variable*NX + k*NX*NY | variable is j and transforms y axis * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_z >= (NX*NZ)){ return; } int i, k; int l, j1, i1, k1; int n1, li, lj, lk, ku, i11, i12, i21, i22; i = x_z % NX; /* i = x */ k = (x_z / NX) % NZ; /* k = z */ const int logd2 = ilog2_device(NY); double uu1_real, x11_real, x21_real; double uu1_imag, x11_imag, x21_imag; double uu2_real, x12_real, x22_real; double uu2_imag, x12_imag, x22_imag; double temp_real, temp2_real; double temp_imag, temp2_imag; for(l=1; l<=logd2; l+=2){ n1 = NY / 2; lk = 1 << (l - 1); li = 1 << (logd2 - l); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu1_real = u_device[ku+i1].real; uu1_imag = is*u_device[ku+i1].imag; /* gty1[k][i11+k1][i] */ x11_real = gty1[i + (i11+k1)*NX + k*NX*NY].real; x11_imag = gty1[i + (i11+k1)*NX + k*NX*NY].imag; /* gty1[k][i12+k1][i] */ x21_real = gty1[i + (i12+k1)*NX + k*NX*NY].real; x21_imag = gty1[i + (i12+k1)*NX + k*NX*NY].imag; /* gty2[k][i21+k1][i] */ gty2[i + (i21+k1)*NX + k*NX*NY].real = x11_real + x21_real; gty2[i + (i21+k1)*NX + k*NX*NY].imag = x11_imag + x21_imag; temp_real = x11_real - x21_real; temp_imag = x11_imag - x21_imag; /* gty2[k][i22+k1][i] */ gty2[i + (i22+k1)*NX + k*NX*NY].real = (uu1_real)*(temp_real) - (uu1_imag)*(temp_imag); gty2[i + (i22+k1)*NX + k*NX*NY].imag = (uu1_real)*(temp_imag) + (uu1_imag)*(temp_real); } } if(l==logd2){ for(j1=0; j1<NY; j1++){ /* gty1[k][j1][i] */ gty1[i + j1*NX + k*NX*NY].real = gty2[i + j1*NX + k*NX*NY].real; gty1[i + j1*NX + k*NX*NY].imag = gty2[i + j1*NX + k*NX*NY].imag; } } else{ n1 = NY / 2; lk = 1 << (l+1 - 1); li = 1 << (logd2 - (l+1)); lj = 2 * lk; ku = li; for(i1=0; i1<=li-1; i1++){ for(k1=0; k1<=lk-1; k1++){ i11 = i1 * lk; i12 = i11 + n1; i21 = i1 * lj; i22 = i21 + lk; uu2_real = u_device[ku+i1].real; uu2_imag = is*u_device[ku+i1].imag; /* gty2[k][i11+k1][i] */ x12_real = gty2[i + (i11+k1)*NX + k*NX*NY].real; x12_imag = gty2[i + (i11+k1)*NX + k*NX*NY].imag; /* gty2[k][i12+k1][i] */ x22_real = gty2[i + (i12+k1)*NX + k*NX*NY].real; x22_imag = gty2[i + (i12+k1)*NX + k*NX*NY].imag; /* gty1[k][i21+k1][i] */ gty1[i + (i21+k1)*NX + k*NX*NY].real = x12_real + x22_real; gty1[i + (i21+k1)*NX + k*NX*NY].imag = x12_imag + x22_imag; temp2_real = x12_real - x22_real; temp2_imag = x12_imag - x22_imag; /* gty1[k][i22+k1][i] */ gty1[i + (i22+k1)*NX + k*NX*NY].real = (uu2_real)*(temp2_real) - (uu2_imag)*(temp2_imag); gty1[i + (i22+k1)*NX + k*NX*NY].imag = (uu2_real)*(temp2_imag) + (uu2_imag)*(temp2_real); } } } } } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts2_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void cffts3_gpu(int is, dcomplex u[], dcomplex x_in[], dcomplex x_out[], dcomplex y0[], dcomplex y1[]){ if(timers_enabled){timer_start(T_FFTZ);} int blocks_per_grid_kernel_1=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT3)); int blocks_per_grid_kernel_2=ceil(double(NX*NY)/double(THREADS_PER_BLOCK_AT_FFT3)); int blocks_per_grid_kernel_3=ceil(double(NX*NY*NZ)/double(THREADS_PER_BLOCK_AT_FFT3)); cffts3_gpu_kernel_1<<<blocks_per_grid_kernel_1, THREADS_PER_BLOCK_AT_FFT3>>>(x_in, y0); cudaDeviceSynchronize(); cffts3_gpu_kernel_2<<<blocks_per_grid_kernel_2, THREADS_PER_BLOCK_AT_FFT3>>>(is, y0, y1, u); cudaDeviceSynchronize(); cffts3_gpu_kernel_3<<<blocks_per_grid_kernel_3, THREADS_PER_BLOCK_AT_FFT3>>>(x_out, y0); cudaDeviceSynchronize(); if(timers_enabled){timer_stop(T_FFTZ);} } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_cfftz_device(const int is, int m, int n, dcomplex x[], dcomplex y[], dcomplex u_device[], int index_arg, int size_arg){ int j,l; /* * --------------------------------------------------------------------- * perform one variant of the Stockham FFT. * --------------------------------------------------------------------- */ for(l=1; l<=m; l+=2){ cffts3_gpu_fftz2_device(is, l, m, n, u_device, x, y, index_arg, size_arg); if(l==m){break;} cffts3_gpu_fftz2_device(is, l + 1, m, n, u_device, y, x, index_arg, size_arg); } /* * --------------------------------------------------------------------- * copy Y to X. * --------------------------------------------------------------------- */ if(m%2==1){ for(j=0; j<n; j++){ x[j*size_arg+index_arg].real = y[j*size_arg+index_arg].real; x[j*size_arg+index_arg].imag = y[j*size_arg+index_arg].imag; } } } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * * index_arg = i + j*NX * * size_arg = NX*NY * ---------------------------------------------------------------------- */ __device__ void cffts3_gpu_fftz2_device(const int is, int l, int m, int n, dcomplex u[], dcomplex x[], dcomplex y[], int index_arg, int size_arg){ int k,n1,li,lj,lk,ku,i,i11,i12,i21,i22; double x11real, x11imag; double x21real, x21imag; dcomplex u1; /* * --------------------------------------------------------------------- * set initial parameters. * --------------------------------------------------------------------- */ n1 = n / 2; lk = 1 << (l - 1); li = 1 << (m - l); lj = 2 * lk; ku = li; for(i=0; i<li; i++){ i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if(is>=1){ u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; }else{ u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } for(k=0; k<lk; k++){ x11real = x[(i11+k)*size_arg+index_arg].real; x11imag = x[(i11+k)*size_arg+index_arg].imag; x21real = x[(i12+k)*size_arg+index_arg].real; x21imag = x[(i12+k)*size_arg+index_arg].imag; y[(i21+k)*size_arg+index_arg].real = x11real + x21real; y[(i21+k)*size_arg+index_arg].imag = x11imag + x21imag; y[(i22+k)*size_arg+index_arg].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[(i22+k)*size_arg+index_arg].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } /* * ---------------------------------------------------------------------- * y0[z][y][x] = x_in[z][y][x] * * y0[x + y*NX + z*NX*NY] = x_in[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_1(dcomplex x_in[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } y0[x_y_z].real = x_in[x_y_z].real; y0[x_y_z].imag = x_in[x_y_z].imag; } /* * ---------------------------------------------------------------------- * pattern = i + j*NX + variable*NX*NY | variable is z and transforms z axis * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_2(const int is, dcomplex gty1[], dcomplex gty2[], dcomplex u_device[]){ int x_y = blockIdx.x * blockDim.x + threadIdx.x; if(x_y >= (NX*NY)){ return; } cffts3_gpu_cfftz_device(is, ilog2_device(NZ), NZ, gty1 , gty2, u_device, x_y /* index_arg */, NX*NY /* size_arg */); } /* * ---------------------------------------------------------------------- * x_out[z][y][x] = y0[z][y][x] * * x_out[x + y*NX + z*NX*NY] = y0[x + y*NX + z*NX*NY] * ---------------------------------------------------------------------- */ __global__ void cffts3_gpu_kernel_3(dcomplex x_out[], dcomplex y0[]){ int x_y_z = blockIdx.x * blockDim.x + threadIdx.x; if(x_y_z >= (NX*NY*NZ)){ return; } x_out[x_y_z].real = y0[x_y_z].real; x_out[x_y_z].imag = y0[x_y_z].imag; } static void checksum_gpu(int iteration, dcomplex u1[]){ int blocks_per_grid=ceil(double(CHECKSUM_TASKS)/double(THREADS_PER_BLOCK_AT_CHECKSUM)); checksum_gpu_kernel<<<blocks_per_grid, THREADS_PER_BLOCK_AT_CHECKSUM>>>(iteration, u1, sums_device); cudaDeviceSynchronize(); } __global__ void checksum_gpu_kernel(int iteration, dcomplex u1[], dcomplex sums[]){ __shared__ dcomplex share_sums[THREADS_PER_BLOCK_AT_CHECKSUM]; int j = (blockIdx.x * blockDim.x + threadIdx.x) + 1; int q, r, s; if(j<=CHECKSUM_TASKS){ q = j % NX; r = 3*j % NY; s = 5*j % NZ; share_sums[threadIdx.x] = u1[ q + r*NX + s*NX*NY ]; }else{ share_sums[threadIdx.x] = dcomplex_create(0.0, 0.0); } __syncthreads(); for(int i=blockDim.x/2; i>0; i>>=1){ if(threadIdx.x<i){ share_sums[threadIdx.x] = dcomplex_add(share_sums[threadIdx.x], share_sums[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0){ share_sums[0].real = share_sums[0].real/(double)(NTOTAL); atomicAdd(&sums[iteration].real,share_sums[0].real); share_sums[0].imag = share_sums[0].imag/(double)(NTOTAL); atomicAdd(&sums[iteration].imag,share_sums[0].imag); } } static void compute_indexmap_gpu(double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP)); compute_indexmap_gpu_kernel<<<blocks_per_grid, THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP>>>(twiddle); } __global__ void compute_indexmap_gpu_kernel(double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } int i = thread_id % NX; int j = (thread_id / NX) % NY; int k = thread_id / (NX * NY); int kk, kk2, jj, kj2, ii; kk = ((k+NZ/2) % NZ) - NZ/2; kk2 = kk*kk; jj = ((j+NY/2) % NY) - NY/2; kj2 = jj*jj+kk2; ii = ((i+NX/2) % NX) - NX/2; twiddle[thread_id] = exp(AP*(double)(ii*ii+kj2)); } static void compute_initial_conditions_gpu(dcomplex u0[]){ int z; double start, an, starts[NZ]; start = SEED; ipow46(A, 0, &an); randlc(&start, an); ipow46(A, 2*NX*NY, &an); starts[0] = start; for(z=1; z<NZ; z++){ randlc(&start, an); starts[z] = start; } cudaMemcpy(starts_device, starts, size_starts_device, cudaMemcpyHostToDevice); int blocks_per_grid=ceil(double(NZ)/double(THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS)); compute_initial_conditions_gpu_kernel<<<blocks_per_grid, THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS>>>(u0, starts_device); } __global__ void compute_initial_conditions_gpu_kernel(dcomplex u0[], double starts[]){ int z = blockIdx.x * blockDim.x + threadIdx.x; if(z>=NZ){return;} double x0 = starts[z]; for(int y=0; y<NY; y++){ vranlc_device(2*NX, &x0, A, (double*)&u0[ 0 + y*NX + z*NX*NY ]); } } static void evolve_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_EVOLVE)); evolve_gpu_kernel<<<blocks_per_grid, THREADS_PER_BLOCK_AT_EVOLVE>>>(u0, u1, twiddle); cudaDeviceSynchronize(); } __global__ void evolve_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=(NZ*NY*NX)){ return; } u0[thread_id] = dcomplex_mul2(u0[thread_id], twiddle[thread_id]); u1[thread_id] = u0[thread_id]; } static void fft_gpu(int dir, dcomplex x1[], dcomplex x2[]){ /* * --------------------------------------------------------------------- * note: args x1, x2 must be different arrays * note: args for cfftsx are (direction, layout, xin, xout, scratch) * xin/xout may be the same and it can be somewhat faster * if they are * --------------------------------------------------------------------- */ if(dir==1){ cffts1_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(1, u_device, x1, x1, y0_device, y1_device); cffts3_gpu(1, u_device, x1, x2, y0_device, y1_device); }else{ cffts3_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts2_gpu(-1, u_device, x1, x1, y0_device, y1_device); cffts1_gpu(-1, u_device, x1, x2, y0_device, y1_device); } } static void fft_init_gpu(int n){ int m,ku,i,j,ln; double t, ti; /* * --------------------------------------------------------------------- * initialize the U array with sines and cosines in a manner that permits * stride one access at each FFT iteration. * --------------------------------------------------------------------- */ m = ilog2(n); u[0] = dcomplex_create((double)m, 0.0); ku = 2; ln = 1; for(j=1; j<=m; j++){ t = PI / ln; for(i=0; i<=ln-1; i++){ ti = i * t; u[i+ku-1] = dcomplex_create(cos(ti), sin(ti)); } ku = ku + ln; ln = 2 * ln; } cudaMemcpy(u_device, u, size_u_device, cudaMemcpyHostToDevice); } static int ilog2(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } __device__ int ilog2_device(int n){ int nn, lg; if(n==1){ return 0; } lg = 1; nn = 2; while(nn<n){ nn = nn << 1; lg++; } return lg; } static void init_ui_gpu(dcomplex u0[], dcomplex u1[], double twiddle[]){ int blocks_per_grid=ceil(double(NTOTAL)/double(THREADS_PER_BLOCK_AT_INIT_UI)); init_ui_gpu_kernel<<<blocks_per_grid, THREADS_PER_BLOCK_AT_EVOLVE>>>(u0, u1, twiddle); cudaDeviceSynchronize(); } __global__ void init_ui_gpu_kernel(dcomplex u0[], dcomplex u1[], double twiddle[]){ int thread_id = blockIdx.x * blockDim.x + threadIdx.x; if(thread_id>=NTOTAL){ return; } u0[thread_id] = dcomplex_create(0.0, 0.0); u1[thread_id] = dcomplex_create(0.0, 0.0); twiddle[thread_id] = 0.0; } static void ipow46(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc(&q, q); n = n2; }else{ randlc(&r, q); n = n-1; } } randlc(&r, q); *result = r; } __device__ void ipow46_device(double a, int exponent, double* result){ double q, r; int n, n2; /* * -------------------------------------------------------------------- * use * a^n = a^(n/2)*a^(n/2) if n even else * a^n = a*a^(n-1) if n odd * ------------------------------------------------------------------- */ *result = 1; if(exponent==0){return;} q = a; r = 1; n = exponent; while(n>1){ n2 = n/2; if(n2*2==n){ randlc_device(&q, q); n = n2; }else{ randlc_device(&r, q); n = n-1; } } randlc_device(&r, q); *result = r; } static void print_timers(){ int i; double t, t_m; char* tstrings[T_MAX+1]; tstrings[1] = (char*)" total "; tstrings[2] = (char*)" setup "; tstrings[3] = (char*)" fft "; tstrings[4] = (char*)" evolve "; tstrings[5] = (char*)" checksum "; tstrings[6] = (char*)" fftx "; tstrings[7] = (char*)" ffty "; tstrings[8] = (char*)" fftz "; t_m = timer_read(T_TOTAL); if(t_m <= 0.0){t_m = 1.00;} for(i = 1; i <= T_MAX; i++){ t = timer_read(i); printf(" timer %2d(%16s) :%9.4f (%6.2f%%)\n", i, tstrings[i], t, t*100.0/t_m); } } __device__ double randlc_device(double* x, double a){ double t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; t1 = R23 * (*x); x1 = (int)t1; x2 = (*x) - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); (*x) = t3 - T46 * t4; return (R46 * (*x)); } static void release_gpu(){ cudaFree(sums_device); cudaFree(starts_device); cudaFree(twiddle_device); cudaFree(u_device); cudaFree(u0_device); cudaFree(u1_device); cudaFree(y0_device); cudaFree(y1_device); } static void setup(){ FILE* fp; if((fp = fopen("timer.flag", "r")) != NULL){ timers_enabled = TRUE; fclose(fp); }else{ timers_enabled = FALSE; } niter = NITER_DEFAULT; printf("\n\n NAS Parallel Benchmarks 4.1 CUDA C++ version - FT Benchmark\n\n"); printf(" Size : %4dx%4dx%4d\n", NX, NY, NZ); printf(" Iterations :%7d\n", niter); printf("\n"); } static void setup_gpu(){ cudaDeviceProp deviceProp; cudaSetDevice(DEFAULT_GPU); cudaGetDeviceProperties(&deviceProp, DEFAULT_GPU); THREADS_PER_BLOCK_AT_COMPUTE_INDEXMAP = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_COMPUTE_INITIAL_CONDITIONS = 128; THREADS_PER_BLOCK_AT_INIT_UI = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_EVOLVE = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT1 = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT2 = deviceProp.maxThreadsPerBlock; THREADS_PER_BLOCK_AT_FFT3 = deviceProp.maxThreadsPerBlock; size_sums_device=sizeof(dcomplex)*(NITER_DEFAULT+1); size_starts_device=sizeof(double)*(NZ); size_twiddle_device=sizeof(double)*(NTOTAL); size_u_device=sizeof(dcomplex)*(MAXDIM); size_u0_device=sizeof(dcomplex)*(NTOTAL); size_u1_device=sizeof(dcomplex)*(NTOTAL); size_y0_device=sizeof(dcomplex)*(NTOTAL); size_y1_device=sizeof(dcomplex)*(NTOTAL); cudaMalloc(&sums_device, size_sums_device); cudaMalloc(&starts_device, size_starts_device); cudaMalloc(&twiddle_device, size_twiddle_device); cudaMalloc(&u_device, size_u_device); cudaMalloc(&u0_device, size_u0_device); cudaMalloc(&u1_device, size_u1_device); cudaMalloc(&y0_device, size_y0_device); cudaMalloc(&y1_device, size_y1_device); omp_set_num_threads(OMP_THREADS); } static void verify(int d1, int d2, int d3, int nt, boolean* verified, char* class_npb){ int i; double err, epsilon; /* * --------------------------------------------------------------------- * reference checksums * --------------------------------------------------------------------- */ dcomplex csum_ref[25+1]; *class_npb = 'U'; epsilon = 1.0e-12; *verified = false; if(d1 == 64 && d2 == 64 && d3 == 64 && nt == 6){ /* * --------------------------------------------------------------------- * sample size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'S'; csum_ref[1] = dcomplex_create(5.546087004964E+02, 4.845363331978E+02); csum_ref[2] = dcomplex_create(5.546385409189E+02, 4.865304269511E+02); csum_ref[3] = dcomplex_create(5.546148406171E+02, 4.883910722336E+02); csum_ref[4] = dcomplex_create(5.545423607415E+02, 4.901273169046E+02); csum_ref[5] = dcomplex_create(5.544255039624E+02, 4.917475857993E+02); csum_ref[6] = dcomplex_create(5.542683411902E+02, 4.932597244941E+02); }else if(d1 == 128 && d2 == 128 && d3 == 32 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb W size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'W'; csum_ref[1] = dcomplex_create(5.673612178944E+02, 5.293246849175E+02); csum_ref[2] = dcomplex_create(5.631436885271E+02, 5.282149986629E+02); csum_ref[3] = dcomplex_create(5.594024089970E+02, 5.270996558037E+02); csum_ref[4] = dcomplex_create(5.560698047020E+02, 5.260027904925E+02); csum_ref[5] = dcomplex_create(5.530898991250E+02, 5.249400845633E+02); csum_ref[6] = dcomplex_create(5.504159734538E+02, 5.239212247086E+02); }else if(d1 == 256 && d2 == 256 && d3 == 128 && nt == 6){ /* * --------------------------------------------------------------------- * class_npb A size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'A'; csum_ref[1] = dcomplex_create(5.046735008193E+02, 5.114047905510E+02); csum_ref[2] = dcomplex_create(5.059412319734E+02, 5.098809666433E+02); csum_ref[3] = dcomplex_create(5.069376896287E+02, 5.098144042213E+02); csum_ref[4] = dcomplex_create(5.077892868474E+02, 5.101336130759E+02); csum_ref[5] = dcomplex_create(5.085233095391E+02, 5.104914655194E+02); csum_ref[6] = dcomplex_create(5.091487099959E+02, 5.107917842803E+02); }else if(d1 == 512 && d2 == 256 && d3 == 256 && nt == 20){ /* * -------------------------------------------------------------------- * class_npb B size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'B'; csum_ref[1] = dcomplex_create(5.177643571579E+02, 5.077803458597E+02); csum_ref[2] = dcomplex_create(5.154521291263E+02, 5.088249431599E+02); csum_ref[3] = dcomplex_create(5.146409228649E+02, 5.096208912659E+02); csum_ref[4] = dcomplex_create(5.142378756213E+02, 5.101023387619E+02); csum_ref[5] = dcomplex_create(5.139626667737E+02, 5.103976610617E+02); csum_ref[6] = dcomplex_create(5.137423460082E+02, 5.105948019802E+02); csum_ref[7] = dcomplex_create(5.135547056878E+02, 5.107404165783E+02); csum_ref[8] = dcomplex_create(5.133910925466E+02, 5.108576573661E+02); csum_ref[9] = dcomplex_create(5.132470705390E+02, 5.109577278523E+02); csum_ref[10] = dcomplex_create(5.131197729984E+02, 5.110460304483E+02); csum_ref[11] = dcomplex_create(5.130070319283E+02, 5.111252433800E+02); csum_ref[12] = dcomplex_create(5.129070537032E+02, 5.111968077718E+02); csum_ref[13] = dcomplex_create(5.128182883502E+02, 5.112616233064E+02); csum_ref[14] = dcomplex_create(5.127393733383E+02, 5.113203605551E+02); csum_ref[15] = dcomplex_create(5.126691062020E+02, 5.113735928093E+02); csum_ref[16] = dcomplex_create(5.126064276004E+02, 5.114218460548E+02); csum_ref[17] = dcomplex_create(5.125504076570E+02, 5.114656139760E+02); csum_ref[18] = dcomplex_create(5.125002331720E+02, 5.115053595966E+02); csum_ref[19] = dcomplex_create(5.124551951846E+02, 5.115415130407E+02); csum_ref[20] = dcomplex_create(5.124146770029E+02, 5.115744692211E+02); }else if(d1 == 512 && d2 == 512 && d3 == 512 && nt == 20){ /* * --------------------------------------------------------------------- * class_npb C size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'C'; csum_ref[1] = dcomplex_create(5.195078707457E+02, 5.149019699238E+02); csum_ref[2] = dcomplex_create(5.155422171134E+02, 5.127578201997E+02); csum_ref[3] = dcomplex_create(5.144678022222E+02, 5.122251847514E+02); csum_ref[4] = dcomplex_create(5.140150594328E+02, 5.121090289018E+02); csum_ref[5] = dcomplex_create(5.137550426810E+02, 5.121143685824E+02); csum_ref[6] = dcomplex_create(5.135811056728E+02, 5.121496764568E+02); csum_ref[7] = dcomplex_create(5.134569343165E+02, 5.121870921893E+02); csum_ref[8] = dcomplex_create(5.133651975661E+02, 5.122193250322E+02); csum_ref[9] = dcomplex_create(5.132955192805E+02, 5.122454735794E+02); csum_ref[10] = dcomplex_create(5.132410471738E+02, 5.122663649603E+02); csum_ref[11] = dcomplex_create(5.131971141679E+02, 5.122830879827E+02); csum_ref[12] = dcomplex_create(5.131605205716E+02, 5.122965869718E+02); csum_ref[13] = dcomplex_create(5.131290734194E+02, 5.123075927445E+02); csum_ref[14] = dcomplex_create(5.131012720314E+02, 5.123166486553E+02); csum_ref[15] = dcomplex_create(5.130760908195E+02, 5.123241541685E+02); csum_ref[16] = dcomplex_create(5.130528295923E+02, 5.123304037599E+02); csum_ref[17] = dcomplex_create(5.130310107773E+02, 5.123356167976E+02); csum_ref[18] = dcomplex_create(5.130103090133E+02, 5.123399592211E+02); csum_ref[19] = dcomplex_create(5.129905029333E+02, 5.123435588985E+02); csum_ref[20] = dcomplex_create(5.129714421109E+02, 5.123465164008E+02); }else if(d1 == 2048 && d2 == 1024 && d3 == 1024 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb D size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'D'; csum_ref[1] = dcomplex_create(5.122230065252E+02, 5.118534037109E+02); csum_ref[2] = dcomplex_create(5.120463975765E+02, 5.117061181082E+02); csum_ref[3] = dcomplex_create(5.119865766760E+02, 5.117096364601E+02); csum_ref[4] = dcomplex_create(5.119518799488E+02, 5.117373863950E+02); csum_ref[5] = dcomplex_create(5.119269088223E+02, 5.117680347632E+02); csum_ref[6] = dcomplex_create(5.119082416858E+02, 5.117967875532E+02); csum_ref[7] = dcomplex_create(5.118943814638E+02, 5.118225281841E+02); csum_ref[8] = dcomplex_create(5.118842385057E+02, 5.118451629348E+02); csum_ref[9] = dcomplex_create(5.118769435632E+02, 5.118649119387E+02); csum_ref[10] = dcomplex_create(5.118718203448E+02, 5.118820803844E+02); csum_ref[11] = dcomplex_create(5.118683569061E+02, 5.118969781011E+02); csum_ref[12] = dcomplex_create(5.118661708593E+02, 5.119098918835E+02); csum_ref[13] = dcomplex_create(5.118649768950E+02, 5.119210777066E+02); csum_ref[14] = dcomplex_create(5.118645605626E+02, 5.119307604484E+02); csum_ref[15] = dcomplex_create(5.118647586618E+02, 5.119391362671E+02); csum_ref[16] = dcomplex_create(5.118654451572E+02, 5.119463757241E+02); csum_ref[17] = dcomplex_create(5.118665212451E+02, 5.119526269238E+02); csum_ref[18] = dcomplex_create(5.118679083821E+02, 5.119580184108E+02); csum_ref[19] = dcomplex_create(5.118695433664E+02, 5.119626617538E+02); csum_ref[20] = dcomplex_create(5.118713748264E+02, 5.119666538138E+02); csum_ref[21] = dcomplex_create(5.118733606701E+02, 5.119700787219E+02); csum_ref[22] = dcomplex_create(5.118754661974E+02, 5.119730095953E+02); csum_ref[23] = dcomplex_create(5.118776626738E+02, 5.119755100241E+02); csum_ref[24] = dcomplex_create(5.118799262314E+02, 5.119776353561E+02); csum_ref[25] = dcomplex_create(5.118822370068E+02, 5.119794338060E+02); }else if(d1 == 4096 && d2 == 2048 && d3 == 2048 && nt == 25){ /* * --------------------------------------------------------------------- * class_npb E size reference checksums * --------------------------------------------------------------------- */ *class_npb = 'E'; csum_ref[1] = dcomplex_create(5.121601045346E+02, 5.117395998266E+02); csum_ref[2] = dcomplex_create(5.120905403678E+02, 5.118614716182E+02); csum_ref[3] = dcomplex_create(5.120623229306E+02, 5.119074203747E+02); csum_ref[4] = dcomplex_create(5.120438418997E+02, 5.119345900733E+02); csum_ref[5] = dcomplex_create(5.120311521872E+02, 5.119551325550E+02); csum_ref[6] = dcomplex_create(5.120226088809E+02, 5.119720179919E+02); csum_ref[7] = dcomplex_create(5.120169296534E+02, 5.119861371665E+02); csum_ref[8] = dcomplex_create(5.120131225172E+02, 5.119979364402E+02); csum_ref[9] = dcomplex_create(5.120104767108E+02, 5.120077674092E+02); csum_ref[10] = dcomplex_create(5.120085127969E+02, 5.120159443121E+02); csum_ref[11] = dcomplex_create(5.120069224127E+02, 5.120227453670E+02); csum_ref[12] = dcomplex_create(5.120055158164E+02, 5.120284096041E+02); csum_ref[13] = dcomplex_create(5.120041820159E+02, 5.120331373793E+02); csum_ref[14] = dcomplex_create(5.120028605402E+02, 5.120370938679E+02); csum_ref[15] = dcomplex_create(5.120015223011E+02, 5.120404138831E+02); csum_ref[16] = dcomplex_create(5.120001570022E+02, 5.120432068837E+02); csum_ref[17] = dcomplex_create(5.119987650555E+02, 5.120455615860E+02); csum_ref[18] = dcomplex_create(5.119973525091E+02, 5.120475499442E+02); csum_ref[19] = dcomplex_create(5.119959279472E+02, 5.120492304629E+02); csum_ref[20] = dcomplex_create(5.119945006558E+02, 5.120506508902E+02); csum_ref[21] = dcomplex_create(5.119930795911E+02, 5.120518503782E+02); csum_ref[22] = dcomplex_create(5.119916728462E+02, 5.120528612016E+02); csum_ref[23] = dcomplex_create(5.119902874185E+02, 5.120537101195E+02); csum_ref[24] = dcomplex_create(5.119889291565E+02, 5.120544194514E+02); csum_ref[25] = dcomplex_create(5.119876028049E+02, 5.120550079284E+02); } if(*class_npb != 'U'){ *verified = TRUE; for(i = 1; i <= nt; i++){ err = dcomplex_abs(dcomplex_div(dcomplex_sub(sums[i], csum_ref[i]), csum_ref[i])); if(!(err <= epsilon)){ *verified = FALSE; break; } } } if(*class_npb != 'U'){ if(*verified){ printf(" Result verification successful\n"); }else{ printf(" Result verification failed\n"); } } printf(" class_npb = %c\n", *class_npb); } __device__ void vranlc_device(int n, double* x_seed, double a, double y[]){ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; t1 = R23 * a; a1 = (int)t1; a2 = a - T23 * a1; x = *x_seed; for(i=0; i<n; i++){ t1 = R23 * x; x1 = (int)t1; x2 = x - T23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(R23 * t1); z = t1 - T23 * t2; t3 = T23 * z + a2 * x2; t4 = (int)(R46 * t3); x = t3 - T46 * t4; y[i] = R46 * x; } *x_seed = x; }
2c5aea07ea2b9306ec5be44b19a20df7ec74f41b.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> // includes CUDA #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include "device_launch_parameters.h" // nvcc does not seem to like variadic macros, so we have to define // one for each kernel parameter list: #ifdef __HIPCC__ #define KERNEL_ARGS2(grid,hipLaunchKernelGGL(( block)) , dim3(grid), dim3(block) , 0, 0, #define KERNEL_ARGS3grid, block,hipLaunchKernelGGL(( sh_mem)) , dim3(grid), dim3(block), sh_mem , 0, #define KERNEL_ARGS4grid, block, sh_mem,hipLaunchKernelGGL(( stream)) , dim3(grid), dim3(block), sh_mem, stream , #else #define KERNEL_ARGS2grid, block) #define KERNEL_ARGS3(grid, block, sh_mem) #define KERNEL_ARGS4(grid, block, sh_mem, stream) #endif // struct Index { int block, thread; }; //: __global__ void prob_idx(Index id[]) { int b = blockIdx.x; // int t = threadIdx.x; // int n = blockDim.x; // int x = b*n + t; // //. id[x].block = b; id[x].thread = t; }; // int main() { Index* d; Index h[100]; // hipMalloc((void**)&d, 100 * sizeof(Index)); // int g = 3, b = 4, m = g*b; // prob_idx<<< g, b>>>(d); prob_idx KERNEL_ARGS2(dim3(nBlockCount), dim3(nThreadCount)) (d); // hipMemcpy(h, d, 100 * sizeof(Index), hipMemcpyDeviceToHost); // for (int i = 0; i<m; i++) { printf("h[%d]={block:%d, thread:%d}\n", i, h[i].block, h[i].thread); } // hipFree(d); return 0; }
2c5aea07ea2b9306ec5be44b19a20df7ec74f41b.cu
#include<stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> // includes CUDA #include "cuda.h" #include <cuda_runtime.h> #include "device_launch_parameters.h" // nvcc does not seem to like variadic macros, so we have to define // one for each kernel parameter list: #ifdef __CUDACC__ #define KERNEL_ARGS2(grid, block) <<< grid, block >>> #define KERNEL_ARGS3(grid, block, sh_mem) <<< grid, block, sh_mem >>> #define KERNEL_ARGS4(grid, block, sh_mem, stream) <<< grid, block, sh_mem, stream >>> #else #define KERNEL_ARGS2(grid, block) #define KERNEL_ARGS3(grid, block, sh_mem) #define KERNEL_ARGS4(grid, block, sh_mem, stream) #endif //索引用到的緒構體 struct Index { int block, thread; }; //核心:把索引寫入裝置記憶體 __global__ void prob_idx(Index id[]) { int b = blockIdx.x; //區塊索引 int t = threadIdx.x; //執行緒索引 int n = blockDim.x; //區塊中包含的執行緒數目 int x = b*n + t; //執行緒在陣列中對應的位置 //每個執行緒寫入自己的區塊和執行緒索引. id[x].block = b; id[x].thread = t; }; //主函式 int main() { Index* d; Index h[100]; //配置裝置記憶體 cudaMalloc((void**)&d, 100 * sizeof(Index)); //呼叫裝置核心 int g = 3, b = 4, m = g*b; // prob_idx<<< g, b>>>(d); prob_idx KERNEL_ARGS2(dim3(nBlockCount), dim3(nThreadCount)) (d); //下載裝置記憶體內容到主機上 cudaMemcpy(h, d, 100 * sizeof(Index), cudaMemcpyDeviceToHost); //顯示內容 for (int i = 0; i<m; i++) { printf("h[%d]={block:%d, thread:%d}\n", i, h[i].block, h[i].thread); } //釋放裝置記憶體 cudaFree(d); return 0; }
5ca020c1b9a8ffb8d09ebc339fe0b44382e889cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Author: Alex Krizhevsky ([email protected]) */ #include "../include/cudaconv2.cuh" #if defined(_WIN64) || defined(_WIN32) || defined(__APPLE__) #define uint unsigned int #endif /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(hipTextureObject_t hidActs, hipTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[1]; int numFilters = filters->size[1]; int numModules = hidActs->size[0] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[0] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; THAssert(numImgColors % numGroups == 0); THAssert(numFilters % (32*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); THAssert(numGroups == 1 || numFilterColors % 4 == 0); THAssert(filterPixels == filterSize * filterSize); THAssert(hidActs->size[0] == numModules * numFilters); THAssert(filters->size[0] == filterModuleMult * numFilterColors * filterPixels); THAssert(numModules == numModulesY * numModulesX); THAssert(THCudaTensor_isContiguous(state, hidActs)); THAssert(THCudaTensor_isContiguous(state, filters)); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(THCudaTensor_isContiguous(state, targets)); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; THAssert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix THCudaTensor_resize2d(state, targets, numImgColors*imgPixels, numImages); } else { THAssert(targets->size[0] == numImgColors * imgPixels); THAssert(targets->size[1] == numImages); } const bool scale = scaleTargets != 0; // hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); // hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, // texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { // TODO: this code assumes we hvae 32 filters because it uses filter cache of 32! if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, true >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, false, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { hipTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); hipTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); hipFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(hipDestroyTextureObject(texFilters)); checkCudaErrors(hipDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 8, 4, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 4, 4, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 3, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 3, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { hipFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 8, 1, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { hipFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 4, 1, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, false, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor < 2, 4, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 3, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 2, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { hipFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color < 2, 1, true, true, false >), dim3(blocks), dim3(threads), 0, 0, THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } getLastCudaError("imgActs: kernel execution failed"); } void convImgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActsSt(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActsSt(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
5ca020c1b9a8ffb8d09ebc339fe0b44382e889cb.cu
/* * Author: Alex Krizhevsky ([email protected]) */ #include "../include/cudaconv2.cuh" #if defined(_WIN64) || defined(_WIN32) || defined(__APPLE__) #define uint unsigned int #endif /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread. * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise * targets: (numColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * Number of filters must be divisible by 16. * Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. */ template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_color(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[numColors*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int blockCaseIdx = blockIdx.x * 16*imgsPerThread; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const int numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeX * imgSizeY; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + loadY * numImages * numModules + loadX; filters += threadIdx.x; targets += pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[numColors][imgsPerThread]; #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f]; #pragma unroll for (int c = 0; c < numColors; c++) { shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < numColors; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: 16x16. * blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread) * blockIdx.x.y = 1..numImgColors/colorsPerThread * blockIdx.y determines 4x4 image region in target image. * * threadIdx.x determines case. * threadIdx.y determines pixel. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases. * * numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false. * 16 * imgsPerThread must be divisible by 32. * numImageColors/numGroups must be divisible by colorsPerThread. * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads 16 weights at a time, so those aren't fully coalesced. * This version conserves shared memory by loading 16 filters at a time rather than 32. * * To be used when there are 4-16 color channels. */ template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv> __global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*16][16 + 1]; __shared__ float shHidActs[16][16*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,16*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int numRegionsX = DIVUP(imgSizeX, 4); const int blockRegionIdx = blockIdx.y; const int blockRegionIdxX = blockRegionIdx % numRegionsX; const int blockRegionIdxY = blockRegionIdx / numRegionsX; const int blockRegionLeft = blockRegionIdxX * 4; const int blockRegionTop = blockRegionIdxY * 4; const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4; const int pxY = blockRegionTop + pxYInRegion; const int pxX = blockRegionLeft + pxXInRegion; const int pxIdx = pxY * imgSizeX + pxX; const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX; const uint numModules = numModulesY * numModulesX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * 16 + threadIdx.x; const int loadY = tidx / 32, loadX = tidx % 32; hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX; filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x; targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockRegionTop - paddingStart < filterSize ? 0 : 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride); const int startX = blockRegionLeft - paddingStart < filterSize ? 0 : 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x]; float* shHidActLoad = &shHidActs[loadY][loadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInModuleY = pxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInModuleX = pxX - moduleLeft; const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize; const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX; for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time // Now the threads split up into half-warps, and each half-warp decides if it's interested. const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * 16; i += 32) { if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time. shHidActLoad[j * 16 * imgsPerThread + i] = 0; } } } if (isPxInImg && isPxInModule) { // This half-warp is interested, so it's going to load the weights from this module to its pixel. // Not fully coalesced read :( // But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much. const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation if (isPxInImg && isPxInModule) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int w = 0; w < 16; w++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16]; } } } } __syncthreads(); } } } // Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though if (isPxInImg) { if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i]; } } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCache. * * B_X * imgsPerThread must be divisible by 32. * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by 32. * filterCache must be divisible by B_X*B_Y/32 * B_X*B_Y must be divisible by filterCache * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCache weights at a time, so those aren't fully coalesced (depending on size of filterCache). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCache, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCache + 1]; __shared__ float shHidActs[filterCache][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32; const int filtersLoadY = tidx / filterCache, filtersLoadX = tidx % filterCache; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCache) { // multiply with filterCache filters at a time const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages]; #pragma unroll for (int i = 0; i < imgsPerThread * B_X; i += 32) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } } else { #pragma unroll for (int j = 0; j < filterCache; j += B_X*B_Y/32) { // load filterCache rows of imgsPerThread*B_X cols, 8 * 32 elements at a time. shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCache) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCache) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * (filterCache + 1)] = fLoad[i * filterPixels * numFilters]; } } __syncthreads(); // Do some actual computation #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int w = 0; w < filterCache; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * Block size: B_YxB_X. * blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread. * In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread) * blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread) * blockIdx.y determines image pixel in target image. * * threadIdx.x determines case. * threadIdx.y determines color. * * hidActs: (numFilters, numModulesY, numModulesX, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgSizeY, imgSizeX, numImages) * * Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases. * * numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false. * numFiltersPerGroup must be divisible by filterCacheF. * * numFilterColors must be divisible by B_Y*colorsPerThread. * B_X*B_Y must be divisible by filterCacheF * filterCacheF must be divisible by filterCacheH * * This version loads 32 cases at a time, so it gets full coalescing on that load. * It only loads filterCacheF weights at a time, so those aren't fully coalesced (depending on size of filterCacheF). * * To be used when there are >= 16 color channels. */ template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void conv_img_acts_manycolor_kepler(const float* hidActs, const float* filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; const int hidActLoadY = threadIdx.y, hidActLoadX = threadIdx.x; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX; filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX]; //const bool noFLoop = filterCacheF == filterCacheH; for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { const int moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f] : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f]; #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = fLoad[i * filterPixels * numFilters]; } } //#pragma unroll for (int fh = f; fh < f + filterCacheF; fh += filterCacheH) { //conv_img_acts_manycolor_dummy_fhLoop<B_Y, B_X, imgsPerThread, colorsPerThread, filterCacheF, filterCacheH, checkCaseBounds>(hidActs, shHidActLoad, shHidActs, shFilters, moduleIdx, numImages, hidActLoadY, hidActLoadX, blockCaseIdx, numModules, f, fh, prod); const float* hLoad = &hidActs[(moduleIdx + fh * numModules) * numImages]; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || hidActLoadY + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread*B_X; i += B_X) { if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i]; } else { shHidActLoad[j * B_X * imgsPerThread + i] = 0; } } } } __syncthreads(); // Do some actual computation // Using these variables causes register usage to go from 161 --> 123. // But nonetheless, the high-register version is faster. //const float* shF = &shFilters[threadIdx.y][fh-f]; //const float* const shF2 = &shFilters[threadIdx.y][fh]; //const float* shH = &shHidActs[0][threadIdx.x]; #pragma unroll for (int w = 0; w < filterCacheH; w++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { prod[c][i] += shFilters[c * B_Y + threadIdx.y][fh-f + w] * shHidActs[w][threadIdx.x + i * B_X]; } } } __syncthreads(); } } } } if (scale) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * New Titan-optimized stuff. */ __device__ __forceinline__ void conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(const int my, const int mx, const int numModulesX, const int paddingStart, const int moduleStride, const int blockPixelIdxY, const int blockPixelIdxX, const int filterSize, int &moduleIdx, int &pxIdxInFilter) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; moduleIdx = my * numModulesX + mx; // out const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; // out } #define IA_PRELOAD_LOOP(w,offset) _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ /* * Same loop as above but inverted. */ #define IA_PRELOAD_LOOP2(w,offset) _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ _Pragma("unroll") \ for (int i = 0; i < imgsPerThread; i++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_LOOP3(i,offset) _Pragma("unroll") \ for (int w = 0; w < filterCacheH; w++) { \ _Pragma("unroll") \ for (int c = 0; c < colorsPerThread; c++) { \ prod[c][i] += shFilters[c * B_Y + threadIdx.y][(w)+(offset)] * shHidActs[w][threadIdx.x * imgsPerThread + i]; \ } \ } \ #define IA_PRELOAD_W(z) wPreload[z] = fLoad[(z) * B_X*B_Y/filterCacheF * filterPixels * numFilters]; #define IA_PRELOAD_W_TX(z) wPreload[z] = tex1Dfetch<float>(filters, filtersLoadOffset + (z) * B_X*B_Y/filterCacheF * filterPixels * numFilters); #define IA_PRELOAD_H(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = hLoad[(y) * B_Y * numModules * numImages + (x) * B_X]; \ } #define IA_PRELOAD_H_TX(y,x) if (!checkCaseBounds || myCaseIdx + (x) * B_X < numImages) { \ hPreload[y][x] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + (y) * B_Y * numModules * numImages + (x) * B_X); \ } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void __launch_bounds__(256, 2) // 256 threads per block, 2 blocks per multiprocessor // These launch bounds ensure 25% occupancy (128 registers used) // as oppposed to 13% (130 registers) achieved by defaults. conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [2][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [8] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + 0 : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[(moduleIdx + 0 * numModules) * numImages]; int hidActsLoadOffset = hidActsOffset + (moduleIdx + 0 * numModules) * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheH) * numModules) * numImages; #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_W_TX(z); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,0); IA_PRELOAD_H_TX((z-4)/4,z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,0); } __syncthreads(); #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } __syncthreads(); hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } #pragma unroll for (int z = 0; z < 4; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_W_TX(z+4); } #pragma unroll for (int z = 4; z < 12; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); IA_PRELOAD_H_TX((z-4)/4, z%4); } #pragma unroll for (int z = 12; z < 16; ++z) { IA_PRELOAD_LOOP(z,filterCacheH); } __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, int filterCacheF, int filterCacheH, bool scale, bool checkCaseBounds, bool conv> __global__ void //__launch_bounds__(128, 3) // 128 threads per block, 3 blocks per multiprocessor conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16(cudaTextureObject_t hidActs, cudaTextureObject_t filters, float* targets, const int numModulesY, const int numModulesX, const int numImages, const int numFilters, const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs) { __shared__ float shFilters[colorsPerThread*B_Y][filterCacheF]; __shared__ float shHidActs[filterCacheH][B_X*imgsPerThread]; const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread); const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread; const int myCaseIdx = blockCaseIdx + threadIdx.x; const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally const int numFilterColors = numImgColors / numGroups; const int blockGroupIdx = imgColorIdx / numFilterColors; const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group const int numFiltersPerGroup = numFilters / numGroups; const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup; const int blockPixelIdx = blockIdx.y; const int blockPixelIdxX = blockPixelIdx % imgSizeX; const int blockPixelIdxY = blockPixelIdx / imgSizeX; const int filterPixels = filterSize * filterSize; const int imgPixels = imgSizeY * imgSizeX; const int tidx = threadIdx.y * B_X + threadIdx.x; // const int hidActLoadY = threadIdx.y % B_Y, hidActLoadX = threadIdx.x % B_X; //const int hidActLoadY = tidx / (B_X*imgsPerThread), hidActLoadX = tidx % (B_X*imgsPerThread); const int filtersLoadY = tidx / filterCacheF, filtersLoadX = tidx % filterCacheF; // nvcc is behaving idiotically again, these useless declarations save registers //const int outputY = threadIdx.y, outputX = threadIdx.x; //const int ty = threadIdx.y, tx = threadIdx.x; const int numModules = numModulesY * numModulesX; const int hidActsOffset = (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; const int filtersOffset = blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; // hidActs += (blockFilterIdx + threadIdx.y) * numImages * numModules + myCaseIdx; // filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX; targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + myCaseIdx; float prod[colorsPerThread][imgsPerThread]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { prod[c][i] = 0; } } const int startY = blockPixelIdxY - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride; const int endY = min(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride); const int startX = blockPixelIdxX - paddingStart < filterSize ? 0 : 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride; const int endX = min(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride); float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX]; float* shHidActLoad = &shHidActs[threadIdx.y][threadIdx.x * imgsPerThread]; //const bool noFLoop = filterCacheF == filterCacheH; /* * Initial preload */ float hPreload[filterCacheH/B_Y][imgsPerThread]; // [4][4] float wPreload[filterCacheF*colorsPerThread/B_X]; // [6] int moduleIdx, pxIdxInFilter; conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(startY, startX, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdx, pxIdxInFilter); // const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + 0] // : &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + 0]; int filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters); #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { wPreload[i * filterCacheF/(B_X*B_Y)] = tex1Dfetch<float>(filters, filtersLoadOffset + i * filterPixels * numFilters); } } // const float* hLoad = &hidActs[moduleIdx * numImages]; int hidActsLoadOffset = hidActsOffset + moduleIdx * numImages; #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { hPreload[j/B_Y][i] = tex1Dfetch<float>(hidActs, hidActsLoadOffset + j * numModules * numImages + i * B_X); } } } } for (int my = startY; my < endY; my++) { const int moduleTop = paddingStart + my * moduleStride; const int pxInFilterY = blockPixelIdxY - moduleTop; for (int mx = startX; mx < endX; mx++) { moduleIdx = my * numModulesX + mx; const int moduleLeft = paddingStart + mx * moduleStride; const int pxInFilterX = blockPixelIdxX - moduleLeft; pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX; int myNext = my, mxNext = mx, moduleIdxNext, pxIdxInFilterNext; const bool lastModule = my == endY - 1 && mx == endX - 1; if (!lastModule) { mxNext = mx + 1 == endX ? startX : mx + 1; myNext = my + (mx + 1 == endX); } conv_img_acts_manycolor_preload_ty_8_tx_32_c_8_ff_32_fh_16_setCoords(myNext, mxNext, numModulesX, paddingStart, moduleStride, blockPixelIdxY, blockPixelIdxX, filterSize, moduleIdxNext, pxIdxInFilterNext); for (int f = 0; f < numFiltersPerGroup; f += filterCacheF) { // multiply with filterCacheF filters at a time #pragma unroll for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/filterCacheF) { if ((colorsPerThread*B_Y) % (B_X*B_Y/filterCacheF) == 0 || i + filtersLoadY < colorsPerThread*B_Y) { shFilterLoad[i * filterCacheF] = wPreload[i * filterCacheF/(B_X*B_Y)]; } } filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilter * numFilters + f + filterCacheF : moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f + filterCacheF); if (f == numFiltersPerGroup - filterCacheF) { filtersLoadOffset = filtersOffset + (conv ? pxIdxInFilterNext * numFilters : moduleIdxNext * numFilterColors * filterPixels * numFilters + pxIdxInFilterNext * numFilters); } #pragma unroll for (int j = 0; j < filterCacheH; j += B_Y) { if (filterCacheH % B_Y == 0 || threadIdx.y + j < filterCacheH) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { // NOTE: bank conflicts here! if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { shHidActLoad[j * B_X * imgsPerThread + i] = hPreload[j/B_Y][i]; } } } } hidActsLoadOffset = hidActsOffset + (moduleIdx + (f + filterCacheF) * numModules) * numImages; if (f == numFiltersPerGroup - filterCacheF) { hidActsLoadOffset = hidActsOffset + moduleIdxNext * numImages; } __syncthreads(); // It seems that there is no point explicitly interleaving loads // and computations because the scheduler does that anyway. IA_PRELOAD_LOOP2(0,0); IA_PRELOAD_LOOP2(1,0); IA_PRELOAD_LOOP2(2,0); IA_PRELOAD_LOOP2(3,0); IA_PRELOAD_LOOP2(4,0); IA_PRELOAD_LOOP2(5,0); IA_PRELOAD_LOOP2(6,0); IA_PRELOAD_LOOP2(7,0); IA_PRELOAD_LOOP2(8,0); IA_PRELOAD_LOOP2(9,0); IA_PRELOAD_LOOP2(10,0); IA_PRELOAD_LOOP2(11,0); IA_PRELOAD_LOOP2(12,0); IA_PRELOAD_LOOP2(13,0); IA_PRELOAD_LOOP2(14,0); IA_PRELOAD_LOOP2(15,0); IA_PRELOAD_W_TX(0); IA_PRELOAD_W_TX(1); IA_PRELOAD_W_TX(2); IA_PRELOAD_W_TX(3); IA_PRELOAD_W_TX(4); IA_PRELOAD_W_TX(5); IA_PRELOAD_H_TX(0,0); IA_PRELOAD_H_TX(0,1); IA_PRELOAD_H_TX(0,2); IA_PRELOAD_H_TX(0,3); IA_PRELOAD_H_TX(1,0); IA_PRELOAD_H_TX(1,1); IA_PRELOAD_H_TX(1,2); IA_PRELOAD_H_TX(1,3); IA_PRELOAD_H_TX(2,0); IA_PRELOAD_H_TX(2,1); IA_PRELOAD_H_TX(2,2); IA_PRELOAD_H_TX(2,3); IA_PRELOAD_H_TX(3,0); IA_PRELOAD_H_TX(3,1); IA_PRELOAD_H_TX(3,2); IA_PRELOAD_H_TX(3,3); __syncthreads(); } } } if (scale) { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i]; } } } } else { #pragma unroll for (int c = 0; c < colorsPerThread; c++) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkCaseBounds || myCaseIdx + i * B_X < numImages) { targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i]; } } } } } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (overSample, numImgColors, imgPixels, numImages) * * Note: all of these convolution routines are optimized for the case when * the number of images (i.e. the minibatch size) is a multiple of 128. * Other batch sizes will work, but but I made no attempt whatsoever * to make them work fast. */ void _imgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[1]; int numFilters = filters->size[1]; int numModules = hidActs->size[0] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[0] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; int numModulesX = numModules / numModulesY; THAssert(numImgColors % numGroups == 0); THAssert(numFilters % (32*numGroups) == 0); // TODO: insisting on 32 filters due to bug in calling code below. fix that. THAssert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); THAssert(numGroups == 1 || numFilterColors % 4 == 0); THAssert(filterPixels == filterSize * filterSize); THAssert(hidActs->size[0] == numModules * numFilters); THAssert(filters->size[0] == filterModuleMult * numFilterColors * filterPixels); THAssert(numModules == numModulesY * numModulesX); THAssert(THCudaTensor_isContiguous(state, hidActs)); THAssert(THCudaTensor_isContiguous(state, filters)); // These routines don't handle the case when only part of the image is visited in the convolution THAssert(paddingStart <= 0); THAssert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX); THAssert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY); THAssert(moduleStride <= filterSize); THAssert(THCudaTensor_isContiguous(state, targets)); // no stride support here! dim3 blocks; dim3 threads; int colorsPerThread, imgsPerThread; if (numFilterColors % 8 == 0) { threads = dim3(32, numFilterColors % 64 == 0 ? 8 : 4); colorsPerThread = numFilterColors % 64 == 0 ? 8 : numFilterColors % 48 == 0 ? 12 : numFilterColors % 32 == 0 ? 8 : numFilterColors % 16 == 0 ? 4 : 2; imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1; THAssert(numFilterColors % (threads.y * colorsPerThread) == 0); blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); // NOTE: the case when channels % 32 == 0 but channels % 48 != 0 and channels % 64 != 0 has not been optimized!! } else if (numFilterColors > 3) { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } else { // NOTE: THIS CASE HAS NOT BEEN OPTIMIZED FOR KEPLER!! imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2; threads = dim3(16, 16); blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4)); } bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; if (scaleTargets == 0) { // do not scale or use targets matrix THCudaTensor_resize2d(state, targets, numImgColors*imgPixels, numImages); } else { THAssert(targets->size[0] == numImgColors * imgPixels); THAssert(targets->size[1] == numImages); } const bool scale = scaleTargets != 0; // cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); // conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0>>>( // texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, // imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); //return; // printf("conv: %d\n", conv); // printf("scale: %d\n", scale); // printf("checkCaseBounds: %d\n", checkCaseBounds); // printf("numFilterColors: %d\n", numFilterColors); // printf("numImages: %d\n", numImages); if (conv == true) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { // TODO: this code assumes we hvae 32 filters because it uses filter cache of 32! if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, true ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, true ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, true >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, true >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, true ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } else if (conv == false) { if (scale == false) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, false, false, false ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, false, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, false, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, false, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else if (scale == true) { if (checkCaseBounds == false) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_8_tx_32_c_8_ff_32_fh_16_tex< 8, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 128 == 0) { cudaTextureObject_t texFilters = THCudaTensor_getTextureObject(state, filters); cudaTextureObject_t texHidActs = THCudaTensor_getTextureObject(state, hidActs); cudaFuncSetCacheConfig(conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_preloadfh_ty_4_tx_32_c_12_ff_16_fh_16< 4, 32, 4, 12, 16, 16, true, false, false ><<<blocks, threads, 0>>>(texHidActs, texFilters, THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); checkCudaErrors(cudaDestroyTextureObject(texFilters)); checkCudaErrors(cudaDestroyTextureObject(texHidActs)); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 12, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 4, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 4, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 4, 2, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 2, 2, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 8, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 8, 4, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 4, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 4, 4, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, false, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 3, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 3, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 128 == 0) { cudaFuncSetCacheConfig(img_acts_color < 8, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 8, 1, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 64 == 0) { cudaFuncSetCacheConfig(img_acts_color < 4, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 4, 1, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 32 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numImages % 16 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, false, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, false, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else if (checkCaseBounds == true) { if (numFilterColors % 8 == 0) { if (numFilterColors % 64 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 8, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 48 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 12, 16, 16, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 32 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 8, 32, 16, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 16 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 4, 16, 16, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors % 8 == 0) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false >, cudaFuncCachePreferShared); conv_img_acts_manycolor_kepler < 4, 32, 1, 2, 16, 16, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (numFilterColors == 4) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_mediumcolor < 2, 4, true, true, false >, cudaFuncCachePreferShared); img_acts_mediumcolor < 2, 4, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } else if (numFilterColors <= 3) { if (numFilterColors == 3) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 3, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 3, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 2) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 2, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 2, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else if (numFilterColors == 1) { if (numImages % 1 == 0) { cudaFuncSetCacheConfig(img_acts_color < 2, 1, true, true, false >, cudaFuncCachePreferShared); img_acts_color < 2, 1, true, true, false ><<<blocks, threads, 0>>>(THCudaTensor_data(state, hidActs), THCudaTensor_data(state, filters), THCudaTensor_data(state, targets), numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } } getLastCudaError("imgActs: kernel execution failed"); } void convImgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } void convImgActsSt(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true); } void localImgActs(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } void localImgActsSt(THCState* state, THCudaTensor* hidActs, THCudaTensor* filters, THCudaTensor* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput) { _imgActs(state, hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false); }
bd1d4e206b2546311f1a40d741998ee19fe36ecd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void brickSort(int* array, int arrayLen, int p) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= arrayLen - 1) return; if ((p % 2 == 0) && (idx % 2 == 1)) return; if ((p % 2 == 1) && (idx % 2 == 0)) return; if (array[idx] > array[idx + 1]) { int tmp = array[idx + 1]; array[idx + 1] = array[idx]; array[idx] = tmp; } }
bd1d4e206b2546311f1a40d741998ee19fe36ecd.cu
#include "includes.h" __global__ void brickSort(int* array, int arrayLen, int p) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= arrayLen - 1) return; if ((p % 2 == 0) && (idx % 2 == 1)) return; if ((p % 2 == 1) && (idx % 2 == 0)) return; if (array[idx] > array[idx + 1]) { int tmp = array[idx + 1]; array[idx + 1] = array[idx]; array[idx] = tmp; } }
3c270a9eabe1de96c29ac477d6185e7bacb1feeb.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define CHECK(call) \ { \ hipError_t err = call; \ if (err != hipSuccess) \ { \ fprintf(stderr, "Failed with error code %s\n", hipGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } __device__ __constant__ float d_Filter[3 * 3]; __global__ void convolute(float *d_i, float *d_o, int dim, int block_dim) { int tile_dim = block_dim + 2; extern __shared__ float sData[]; int n_subs = (tile_dim + blockDim.y - 1) / blockDim.y; int tid_x = blockIdx.x * block_dim + threadIdx.x; int i; int t_e_col = (blockIdx.x * block_dim + tile_dim < dim)? (blockIdx.x * block_dim + tile_dim) : dim; int t_e_row = (blockIdx.y * block_dim + tile_dim < dim)? (blockIdx.y * block_dim + tile_dim) : dim; int t_row; int tid_y; for (i = 0; i < n_subs; ++i) { t_row = threadIdx.y + i * blockDim.y; tid_y = blockIdx.y * block_dim + t_row; if (tid_x < t_e_col && tid_y < t_e_row) { sData[t_row * tile_dim + threadIdx.x] = d_i[tid_y * dim + tid_x]; } } __syncthreads(); for (int sub = 0; sub < n_subs; ++sub) { t_row = threadIdx.y + sub * blockDim.y; tid_y = blockIdx.y * block_dim + t_row; if (tid_x >= blockIdx.x * block_dim + 1 && tid_x < t_e_col - 1 && tid_y >= blockIdx.y * block_dim + 1 && tid_y < t_e_row - 1) { d_o[(tid_y - 1) * (dim - 2) + tid_x - 1] = 0.0; for (int i = -1; i <= 1; ++i) { for (int j = -1; j <= 1; ++j) { d_o[(tid_y - 1) * (dim - 2) + tid_x - 1] += sData[t_row * tile_dim + threadIdx.x + i * tile_dim + j] * d_Filter[(i + 1) * 3 + (j + 1)]; } } } } } int main() { int t; int i; float *h_f; h_f = (float *) malloc(9 * sizeof(float)); for (i = 0; i < 9; ++i) { h_f[i] = 1.0 / 9.0; } CHECK(hipMemcpyToSymbol(d_Filter, h_f, 9 * sizeof(float), 0, hipMemcpyHostToDevice)); scanf("%d", &t); while (t--) { float *h_i, *d_i; float *h_o, *d_o; int n; scanf("%d", &n); h_i = (float *) malloc((n + 2) * (n + 2) * sizeof(float)); h_o = (float *) malloc(n * n * sizeof(float)); for (i = 0; i < (n + 2) * (n + 2); ++i) { h_i[i] = 0; } for (i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { scanf("%f", &h_i[(i + 1) * (n + 2) + j + 1]); } } CHECK(hipMalloc((void **) &d_i, (n + 2) * (n + 2) * sizeof(float))); CHECK(hipMalloc((void **) &d_o, n * n * sizeof(float))); CHECK(hipMemcpy(d_i, h_i, (n + 2) * (n + 2) * sizeof(float), hipMemcpyHostToDevice)); int block_dim = 32; int tile_dim = block_dim + 2; dim3 grid((n + block_dim - 1) / block_dim, (n + block_dim - 1) / block_dim, 1); dim3 block((block_dim + 2), 8, 1); int shared_size = tile_dim * tile_dim * sizeof(float); hipLaunchKernelGGL(( convolute), dim3(grid), dim3(block), shared_size, 0, d_i, d_o, n + 2, block_dim); CHECK(hipGetLastError()); CHECK(hipMemcpy(h_o, d_o, n * n * sizeof(float), hipMemcpyDeviceToHost)); /* for (i = 0; i < (n + 2) * (n + 2); ++i) { printf("%4.2f ", h_i[i]); if (i % (n + 2) == n + 1) { printf("\n"); } } */ for (i = 0; i < n * n; ++i) { printf("%4.2f ", h_o[i]); if (i % n == n - 1) { printf("\n"); } } printf("\n"); } }
3c270a9eabe1de96c29ac477d6185e7bacb1feeb.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #define CHECK(call) \ { \ cudaError_t err = call; \ if (err != cudaSuccess) \ { \ fprintf(stderr, "Failed with error code %s\n", cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } __device__ __constant__ float d_Filter[3 * 3]; __global__ void convolute(float *d_i, float *d_o, int dim, int block_dim) { int tile_dim = block_dim + 2; extern __shared__ float sData[]; int n_subs = (tile_dim + blockDim.y - 1) / blockDim.y; int tid_x = blockIdx.x * block_dim + threadIdx.x; int i; int t_e_col = (blockIdx.x * block_dim + tile_dim < dim)? (blockIdx.x * block_dim + tile_dim) : dim; int t_e_row = (blockIdx.y * block_dim + tile_dim < dim)? (blockIdx.y * block_dim + tile_dim) : dim; int t_row; int tid_y; for (i = 0; i < n_subs; ++i) { t_row = threadIdx.y + i * blockDim.y; tid_y = blockIdx.y * block_dim + t_row; if (tid_x < t_e_col && tid_y < t_e_row) { sData[t_row * tile_dim + threadIdx.x] = d_i[tid_y * dim + tid_x]; } } __syncthreads(); for (int sub = 0; sub < n_subs; ++sub) { t_row = threadIdx.y + sub * blockDim.y; tid_y = blockIdx.y * block_dim + t_row; if (tid_x >= blockIdx.x * block_dim + 1 && tid_x < t_e_col - 1 && tid_y >= blockIdx.y * block_dim + 1 && tid_y < t_e_row - 1) { d_o[(tid_y - 1) * (dim - 2) + tid_x - 1] = 0.0; for (int i = -1; i <= 1; ++i) { for (int j = -1; j <= 1; ++j) { d_o[(tid_y - 1) * (dim - 2) + tid_x - 1] += sData[t_row * tile_dim + threadIdx.x + i * tile_dim + j] * d_Filter[(i + 1) * 3 + (j + 1)]; } } } } } int main() { int t; int i; float *h_f; h_f = (float *) malloc(9 * sizeof(float)); for (i = 0; i < 9; ++i) { h_f[i] = 1.0 / 9.0; } CHECK(cudaMemcpyToSymbol(d_Filter, h_f, 9 * sizeof(float), 0, cudaMemcpyHostToDevice)); scanf("%d", &t); while (t--) { float *h_i, *d_i; float *h_o, *d_o; int n; scanf("%d", &n); h_i = (float *) malloc((n + 2) * (n + 2) * sizeof(float)); h_o = (float *) malloc(n * n * sizeof(float)); for (i = 0; i < (n + 2) * (n + 2); ++i) { h_i[i] = 0; } for (i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { scanf("%f", &h_i[(i + 1) * (n + 2) + j + 1]); } } CHECK(cudaMalloc((void **) &d_i, (n + 2) * (n + 2) * sizeof(float))); CHECK(cudaMalloc((void **) &d_o, n * n * sizeof(float))); CHECK(cudaMemcpy(d_i, h_i, (n + 2) * (n + 2) * sizeof(float), cudaMemcpyHostToDevice)); int block_dim = 32; int tile_dim = block_dim + 2; dim3 grid((n + block_dim - 1) / block_dim, (n + block_dim - 1) / block_dim, 1); dim3 block((block_dim + 2), 8, 1); int shared_size = tile_dim * tile_dim * sizeof(float); convolute<<<grid, block, shared_size>>>(d_i, d_o, n + 2, block_dim); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(h_o, d_o, n * n * sizeof(float), cudaMemcpyDeviceToHost)); /* for (i = 0; i < (n + 2) * (n + 2); ++i) { printf("%4.2f ", h_i[i]); if (i % (n + 2) == n + 1) { printf("\n"); } } */ for (i = 0; i < n * n; ++i) { printf("%4.2f ", h_o[i]); if (i % n == n - 1) { printf("\n"); } } printf("\n"); } }
61af602d9e7d49dbdde3ae35339a11201b0bec20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kernel(float *id, float *od, int w, int h, int depth) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; const int dataTotalSize = w * h * depth; const int radius = 2; const int filter_size = 2*radius + 1; const int sW = 6; /* sW == 2 * filter_radius + blockDim.x (or same as 2 * filter_radius + blockDim.y) */ /* boarder do not concerned */ if(x >= w || y >= h || z >= depth) return; else { //global defined int idx = z*w*h+y*w+x; //3d grid(blocks) 2d block(threads) int threadsPerBlock = blockDim.x * blockDim.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = (blockId * threadsPerBlock) + (threadIdx.y * blockDim.x) + threadIdx.x; int g_Idx = threadId; //2d shared memory working __shared__ unsigned char smem[sW][sW]; int s_Idx = threadIdx.x + (threadIdx.y * sW); int s_IdxY = s_Idx / sW; int s_IdxX = s_Idx % sW; //Here: definition error, need edit, haven't finished yet. //int g_IdxY = s_IdxY + (blockIdx.y * blockDim.y); //int g_IdxX = s_IdxX + (blockIdx.x * blockDim.x); //int g_Idx = g_IdxX + (g_IdxY * w); //32 threads working together per warp if(s_IdxY < sW && s_IdxX < sW) //Here: boarder concerned error, need edit { if(x >= 0 && y < w && y >= 0 && y < h && z >= 0 && z < depth ) //Here: boarder concerned error, need edit smem[s_IdxY][s_IdxX] = id[g_Idx]; else smem[s_IdxY][s_IdxX] = 0; __syncthreads(); } /*compute the sum using shared memory*/ float avg = 0.0; for (int i = -radius; i <= radius; i++){ if(s_IdxY + i < 0 /*|| g_IdxY > h*/ ) //Here: boarder concerned error, need edit avg += 0.0; else avg += smem[s_IdxY+i][s_IdxX]; } /*register to global, by now thread*/ avg /= filter_size; if(idx < dataTotalSize) od[idx] = avg; } }
61af602d9e7d49dbdde3ae35339a11201b0bec20.cu
#include "includes.h" __global__ void kernel(float *id, float *od, int w, int h, int depth) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; const int dataTotalSize = w * h * depth; const int radius = 2; const int filter_size = 2*radius + 1; const int sW = 6; /* sW == 2 * filter_radius + blockDim.x (or same as 2 * filter_radius + blockDim.y) */ /* boarder do not concerned */ if(x >= w || y >= h || z >= depth) return; else { //global defined int idx = z*w*h+y*w+x; //3d grid(blocks) 2d block(threads) int threadsPerBlock = blockDim.x * blockDim.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int threadId = (blockId * threadsPerBlock) + (threadIdx.y * blockDim.x) + threadIdx.x; int g_Idx = threadId; //2d shared memory working __shared__ unsigned char smem[sW][sW]; int s_Idx = threadIdx.x + (threadIdx.y * sW); int s_IdxY = s_Idx / sW; int s_IdxX = s_Idx % sW; //Here: definition error, need edit, haven't finished yet. //int g_IdxY = s_IdxY + (blockIdx.y * blockDim.y); //int g_IdxX = s_IdxX + (blockIdx.x * blockDim.x); //int g_Idx = g_IdxX + (g_IdxY * w); //32 threads working together per warp if(s_IdxY < sW && s_IdxX < sW) //Here: boarder concerned error, need edit { if(x >= 0 && y < w && y >= 0 && y < h && z >= 0 && z < depth ) //Here: boarder concerned error, need edit smem[s_IdxY][s_IdxX] = id[g_Idx]; else smem[s_IdxY][s_IdxX] = 0; __syncthreads(); } /*compute the sum using shared memory*/ float avg = 0.0; for (int i = -radius; i <= radius; i++){ if(s_IdxY + i < 0 /*|| g_IdxY > h*/ ) //Here: boarder concerned error, need edit avg += 0.0; else avg += smem[s_IdxY+i][s_IdxX]; } /*register to global, by now thread*/ avg /= filter_size; if(idx < dataTotalSize) od[idx] = avg; } }
df5b5c546c3f9fb1cc0883ee7fe81536cfa1b3f4.hip
// !!! This is a file automatically generated by hipify!!! #include "../../include/layers/dropout_layer.h" #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "../../include/util/math_function_ptr.h" template<typename dtype> __global__ void dropout_forward_backward(const unsigned int threshold, const dtype scale, const int size, const unsigned int* mask, const dtype* input, dtype* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { output[index] = input[index] * (mask[index] >= threshold) * scale; } } namespace BigBang { template<typename dtype> void DropoutLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) { const int size = bottom->size(); unsigned int* mask_data = mask_->mutable_gpu_data(); bigbang_gpu_random_uniform(size, mask_data); const dtype* bottom_data = bottom->gpu_data(); dtype* top_data = top->mutable_gpu_data(); hipLaunchKernelGGL(( dropout_forward_backward), dim3(BigBangGetBlocks(size)), dim3(THREAD_MAX_NUMS) , 0, 0, threshold_, scale_, size, mask_data, bottom_data, top_data); } template<typename dtype> void DropoutLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) { const int size = top->size(); const unsigned int* mask_data = mask_->gpu_data(); const dtype* top_diff_data = top->gpu_diff_data(); dtype* bottom_diff_data = bottom->mutable_gpu_diff_data(); hipLaunchKernelGGL(( dropout_forward_backward), dim3(BigBangGetBlocks(size)), dim3(THREAD_MAX_NUMS) , 0, 0, threshold_, scale_, size, mask_data, top_diff_data, bottom_diff_data); } INSTANTIATE_CLASS_GPU_FUNCTION(DropoutLayer); }
df5b5c546c3f9fb1cc0883ee7fe81536cfa1b3f4.cu
#include "../../include/layers/dropout_layer.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "../../include/util/math_function_ptr.h" template<typename dtype> __global__ void dropout_forward_backward(const unsigned int threshold, const dtype scale, const int size, const unsigned int* mask, const dtype* input, dtype* output) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { output[index] = input[index] * (mask[index] >= threshold) * scale; } } namespace BigBang { template<typename dtype> void DropoutLayer<dtype>::Forward_GPU(const Tensor<dtype>* bottom, Tensor<dtype>* top) { const int size = bottom->size(); unsigned int* mask_data = mask_->mutable_gpu_data(); bigbang_gpu_random_uniform(size, mask_data); const dtype* bottom_data = bottom->gpu_data(); dtype* top_data = top->mutable_gpu_data(); dropout_forward_backward<<<BigBangGetBlocks(size), THREAD_MAX_NUMS >>>(threshold_, scale_, size, mask_data, bottom_data, top_data); } template<typename dtype> void DropoutLayer<dtype>::Backward_GPU(const Tensor<dtype>* top, Tensor<dtype>* bottom) { const int size = top->size(); const unsigned int* mask_data = mask_->gpu_data(); const dtype* top_diff_data = top->gpu_diff_data(); dtype* bottom_diff_data = bottom->mutable_gpu_diff_data(); dropout_forward_backward<<<BigBangGetBlocks(size), THREAD_MAX_NUMS >>>(threshold_, scale_, size, mask_data, top_diff_data, bottom_diff_data); } INSTANTIATE_CLASS_GPU_FUNCTION(DropoutLayer); }
bdc99e4e303b75cd5ba7872fd131601f91be5d87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<math.h> #define NUM 10000000 #define CUDA_ERROR_EXIT(str) do{\ hipError_t err = hipGetLastError();\ if( err != hipSuccess){\ printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) __device__ unsigned int exor(unsigned long a,unsigned long b) { unsigned int res; for (int i = 63; i >= 0; i--) { // Find current bits in x and y bool b1 = a & (1 << i); bool b2 = b & (1 << i); // If both are 1 then 0 else xor is same as OR bool xoredBit = (b1 & b2) ? 0 : (b1 | b2); // Update result res <<= 1; res |= xoredBit; } //res=exor(a,b); return res; } __global__ void calculate(unsigned long *mem,int num,int iter) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= num) return; // unsigned long *t1,*t2; if(i<num/2){ int tmp=i*2; if(tmp+iter<num) mem[tmp]=exor(mem[tmp],mem[tmp+iter]); // else // mem[tmp]=exor(mem[tmp],0); // mem[num]=res; } } int main(int argc, char **argv){ struct timeval start, end, t_start, t_end; int i,blocks=0; unsigned long *p1,*g1; unsigned long seed,num; if(argc == 3){ num = atoi(argv[1]); /*Update after checking*/ if(num <= 0) num = NUM; seed=atoi(argv[2]); } // printf("%d",time(0)); p1 = (unsigned long *)malloc((num+1) *sizeof(unsigned long)); srand(seed); for(i=0; i<num; ++i){ p1[i]=random(); // printf("%d %lu\n",i,p1[i]); } p1[i]=0; gettimeofday(&t_start, NULL); //for(i=0;i<num;i++) // printf("%d-%lu\n",i,p1[i]); hipMalloc(&g1, (num+1) * sizeof(unsigned long)); CUDA_ERROR_EXIT("hipMalloc"); hipMemcpy(g1, p1, (num+1) * sizeof(unsigned long) , hipMemcpyHostToDevice); CUDA_ERROR_EXIT("hipMemcpy"); gettimeofday(&start, NULL); blocks = num /1024; if(num % 1024) ++blocks; for(i=0;i<log(num)/log(2);i++){ hipLaunchKernelGGL(( calculate), dim3(blocks),dim3(1024), 0, 0, g1,num,(int)pow(2,i)); } CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ hipMemcpy(p1, g1, (num+1) * sizeof(unsigned long), hipMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); // for(i=0;i<num;i++) // printf("%d-%lu\n",i,p1[i]); printf("%lu",p1[0]); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); hipFree(g1); /*Print the last element for sanity check*/ printf("The XOR final value is %lu",p1[num]); free(p1); // free(pfinal); return 0; }
bdc99e4e303b75cd5ba7872fd131601f91be5d87.cu
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #include<math.h> #define NUM 10000000 #define CUDA_ERROR_EXIT(str) do{\ cudaError err = cudaGetLastError();\ if( err != cudaSuccess){\ printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) __device__ unsigned int exor(unsigned long a,unsigned long b) { unsigned int res; for (int i = 63; i >= 0; i--) { // Find current bits in x and y bool b1 = a & (1 << i); bool b2 = b & (1 << i); // If both are 1 then 0 else xor is same as OR bool xoredBit = (b1 & b2) ? 0 : (b1 | b2); // Update result res <<= 1; res |= xoredBit; } //res=exor(a,b); return res; } __global__ void calculate(unsigned long *mem,int num,int iter) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= num) return; // unsigned long *t1,*t2; if(i<num/2){ int tmp=i*2; if(tmp+iter<num) mem[tmp]=exor(mem[tmp],mem[tmp+iter]); // else // mem[tmp]=exor(mem[tmp],0); // mem[num]=res; } } int main(int argc, char **argv){ struct timeval start, end, t_start, t_end; int i,blocks=0; unsigned long *p1,*g1; unsigned long seed,num; if(argc == 3){ num = atoi(argv[1]); /*Update after checking*/ if(num <= 0) num = NUM; seed=atoi(argv[2]); } // printf("%d",time(0)); p1 = (unsigned long *)malloc((num+1) *sizeof(unsigned long)); srand(seed); for(i=0; i<num; ++i){ p1[i]=random(); // printf("%d %lu\n",i,p1[i]); } p1[i]=0; gettimeofday(&t_start, NULL); //for(i=0;i<num;i++) // printf("%d-%lu\n",i,p1[i]); cudaMalloc(&g1, (num+1) * sizeof(unsigned long)); CUDA_ERROR_EXIT("cudaMalloc"); cudaMemcpy(g1, p1, (num+1) * sizeof(unsigned long) , cudaMemcpyHostToDevice); CUDA_ERROR_EXIT("cudaMemcpy"); gettimeofday(&start, NULL); blocks = num /1024; if(num % 1024) ++blocks; for(i=0;i<log(num)/log(2);i++){ calculate<<<blocks,1024>>>(g1,num,(int)pow(2,i)); } CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); /* Copy back result*/ cudaMemcpy(p1, g1, (num+1) * sizeof(unsigned long), cudaMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); gettimeofday(&t_end, NULL); // for(i=0;i<num;i++) // printf("%d-%lu\n",i,p1[i]); printf("%lu",p1[0]); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); cudaFree(g1); /*Print the last element for sanity check*/ printf("The XOR final value is %lu",p1[num]); free(p1); // free(pfinal); return 0; }
a4c5f41f573f306e836319530fea5e67fd0c192d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #define PI 3.1415926535897932384626433832795029f #define PIx2 6.2831853071795864769252867665590058f #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define K_ELEMS_PER_GRID 2048 #define PROJECT_DEF 1 struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; #if PROJECT_DEF #define BLOCK_SIZE 512 #define K_VAL_GRID_SIZE (BLOCK_SIZE * 4) __constant__ __device__ kValues const_kValues[K_VAL_GRID_SIZE]; __global__ void ComputePhiMagKernel(int numK, float *phiR, float *phiI, float *phiMag) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t < numK) phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]); } __global__ void ComputePhiMagKernelAsync(int numK, float *phiR, float *phiI, float *phiMag, int offset) { unsigned int t = offset + threadIdx.x + (blockIdx.x * blockDim.x); if (t < numK) { phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]); } } __global__ void ComputeQKernel(int numK, int numX, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t >= numX) return; float x_l = x_d[t]; float y_l = y_d[t]; float z_l = z_d[t]; float Qracc = 0.0f; float Qiacc = 0.0f; float phi = 0.0f; float expArg; int idx = 0; if (numK % 2) { /* if numK is odd */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; } for (; idx < numK; idx++) { /* using thread coarsening technique */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); } Qr_d[t] += Qracc; Qi_d[t] += Qiacc; } __global__ void ComputeQKernelAsync(int numK, int numX, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d, int offset) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t >= numX) return; float x_l = x_d[t + offset]; float y_l = y_d[t + offset]; float z_l = z_d[t + offset]; float Qracc = 0.0f; float Qiacc = 0.0f; float phi = 0.0f; float expArg; int idx = 0; if (numK % 2) { /* if numK is odd */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; } for (; idx < numK; idx++) { /* using thread coarsening technique */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); } Qr_d[t + offset] += Qracc; Qi_d[t + offset] += Qiacc; } void ComputePhiMagGPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d) { unsigned int numBlocks = ((numK - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( ComputePhiMagKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, numK, phiR_d, phiI_d, phiMag_d); } void ComputePhiMagGPUAsync(int numK, int streamSz, float* phiR_d, float* phiI_d, float* phiMag_d, hipStream_t stream, int offset) { unsigned int numBlocks = ((streamSz - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( ComputePhiMagKernelAsync), dim3(dimGrid), dim3(dimBlock), 0, stream, numK, phiR_d, phiI_d, phiMag_d, offset); } void ComputeQGPU(int numK, int numX, struct kValues *kVals, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d) { unsigned int kV_size_to_cover = K_VAL_GRID_SIZE; unsigned int n_iter = ((numK - 1) / K_VAL_GRID_SIZE) + 1; struct kValues *kV_ptr = kVals; unsigned int numBlocks = ((numX - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); for (int iter = 0; iter < n_iter; iter++) { kV_size_to_cover = MIN(K_VAL_GRID_SIZE, numK - (iter * K_VAL_GRID_SIZE)); if (kV_size_to_cover) { hipMemcpyToSymbol(const_kValues, kV_ptr, kV_size_to_cover * sizeof(struct kValues), 0); hipLaunchKernelGGL(( ComputeQKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, kV_size_to_cover, numX, x_d, y_d, z_d, Qr_d, Qi_d); hipDeviceSynchronize(); } kV_ptr += kV_size_to_cover; } } void ComputeQGPUAsync(int numK, int streamSz, struct kValues *kVals, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d, hipStream_t stream, int offset) { unsigned int kV_size_to_cover = K_VAL_GRID_SIZE; unsigned int n_iter = ((numK - 1) / K_VAL_GRID_SIZE) + 1; struct kValues *kV_ptr = kVals; unsigned int numBlocks = ((streamSz - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); for (int iter = 0; iter < n_iter; iter++) { kV_size_to_cover = MIN(K_VAL_GRID_SIZE, numK - (iter * K_VAL_GRID_SIZE)); if (kV_size_to_cover) { hipMemcpyToSymbol(const_kValues, kV_ptr, kV_size_to_cover * sizeof(struct kValues), 0); hipLaunchKernelGGL(( ComputeQKernelAsync), dim3(dimGrid), dim3(dimBlock), 0, stream, kV_size_to_cover, streamSz, x_d, y_d, z_d, Qr_d, Qi_d, offset); hipDeviceSynchronize(); } kV_ptr += kV_size_to_cover; } } #else inline void ComputePhiMagCPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { int indexK = 0; for (indexK = 0; indexK < numK; indexK++) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } inline void ComputeQCPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { float expArg; float cosArg; float sinArg; int indexK, indexX; // Loop over the space and frequency domains. // Generally, numX > numK. // Since loops are not tiled, it's better that the loop with the smaller // cache footprint be innermost. for (indexX = 0; indexX < numX; indexX++) { // Sum the contributions to this point over all frequencies float Qracc = 0.0f; float Qiacc = 0.0f; for (indexK = 0; indexK < numK; indexK++) { expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } #endif void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qr, 0, numX * sizeof(float)); *Qi = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qi, 0, numX * sizeof(float)); }
a4c5f41f573f306e836319530fea5e67fd0c192d.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #define PI 3.1415926535897932384626433832795029f #define PIx2 6.2831853071795864769252867665590058f #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define K_ELEMS_PER_GRID 2048 #define PROJECT_DEF 1 struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; #if PROJECT_DEF #define BLOCK_SIZE 512 #define K_VAL_GRID_SIZE (BLOCK_SIZE * 4) __constant__ __device__ kValues const_kValues[K_VAL_GRID_SIZE]; __global__ void ComputePhiMagKernel(int numK, float *phiR, float *phiI, float *phiMag) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t < numK) phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]); } __global__ void ComputePhiMagKernelAsync(int numK, float *phiR, float *phiI, float *phiMag, int offset) { unsigned int t = offset + threadIdx.x + (blockIdx.x * blockDim.x); if (t < numK) { phiMag[t] = (phiR[t] * phiR[t]) + (phiI[t] * phiI[t]); } } __global__ void ComputeQKernel(int numK, int numX, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t >= numX) return; float x_l = x_d[t]; float y_l = y_d[t]; float z_l = z_d[t]; float Qracc = 0.0f; float Qiacc = 0.0f; float phi = 0.0f; float expArg; int idx = 0; if (numK % 2) { /* if numK is odd */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; } for (; idx < numK; idx++) { /* using thread coarsening technique */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); } Qr_d[t] += Qracc; Qi_d[t] += Qiacc; } __global__ void ComputeQKernelAsync(int numK, int numX, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d, int offset) { unsigned int t = threadIdx.x + (blockIdx.x * blockDim.x); if (t >= numX) return; float x_l = x_d[t + offset]; float y_l = y_d[t + offset]; float z_l = z_d[t + offset]; float Qracc = 0.0f; float Qiacc = 0.0f; float phi = 0.0f; float expArg; int idx = 0; if (numK % 2) { /* if numK is odd */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; } for (; idx < numK; idx++) { /* using thread coarsening technique */ expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); idx++; expArg = PIx2 * (const_kValues[idx].Kx * x_l + const_kValues[idx].Ky * y_l + const_kValues[idx].Kz * z_l); phi = const_kValues[idx].PhiMag; Qracc += phi * cos(expArg); Qiacc += phi * sin(expArg); } Qr_d[t + offset] += Qracc; Qi_d[t + offset] += Qiacc; } void ComputePhiMagGPU(int numK, float* phiR_d, float* phiI_d, float* phiMag_d) { unsigned int numBlocks = ((numK - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); ComputePhiMagKernel<<<dimGrid, dimBlock>>>(numK, phiR_d, phiI_d, phiMag_d); } void ComputePhiMagGPUAsync(int numK, int streamSz, float* phiR_d, float* phiI_d, float* phiMag_d, cudaStream_t stream, int offset) { unsigned int numBlocks = ((streamSz - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); ComputePhiMagKernelAsync<<<dimGrid, dimBlock, 0, stream>>>(numK, phiR_d, phiI_d, phiMag_d, offset); } void ComputeQGPU(int numK, int numX, struct kValues *kVals, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d) { unsigned int kV_size_to_cover = K_VAL_GRID_SIZE; unsigned int n_iter = ((numK - 1) / K_VAL_GRID_SIZE) + 1; struct kValues *kV_ptr = kVals; unsigned int numBlocks = ((numX - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); for (int iter = 0; iter < n_iter; iter++) { kV_size_to_cover = MIN(K_VAL_GRID_SIZE, numK - (iter * K_VAL_GRID_SIZE)); if (kV_size_to_cover) { cudaMemcpyToSymbol(const_kValues, kV_ptr, kV_size_to_cover * sizeof(struct kValues), 0); ComputeQKernel<<<dimGrid, dimBlock>>>(kV_size_to_cover, numX, x_d, y_d, z_d, Qr_d, Qi_d); cudaDeviceSynchronize(); } kV_ptr += kV_size_to_cover; } } void ComputeQGPUAsync(int numK, int streamSz, struct kValues *kVals, float *x_d, float *y_d, float *z_d, float *Qr_d, float *Qi_d, cudaStream_t stream, int offset) { unsigned int kV_size_to_cover = K_VAL_GRID_SIZE; unsigned int n_iter = ((numK - 1) / K_VAL_GRID_SIZE) + 1; struct kValues *kV_ptr = kVals; unsigned int numBlocks = ((streamSz - 1) / BLOCK_SIZE) + 1; dim3 dimGrid(numBlocks, 1, 1); dim3 dimBlock(BLOCK_SIZE, 1, 1); for (int iter = 0; iter < n_iter; iter++) { kV_size_to_cover = MIN(K_VAL_GRID_SIZE, numK - (iter * K_VAL_GRID_SIZE)); if (kV_size_to_cover) { cudaMemcpyToSymbol(const_kValues, kV_ptr, kV_size_to_cover * sizeof(struct kValues), 0); ComputeQKernelAsync<<<dimGrid, dimBlock, 0, stream>>>(kV_size_to_cover, streamSz, x_d, y_d, z_d, Qr_d, Qi_d, offset); cudaDeviceSynchronize(); } kV_ptr += kV_size_to_cover; } } #else inline void ComputePhiMagCPU(int numK, float* phiR, float* phiI, float* __restrict__ phiMag) { int indexK = 0; for (indexK = 0; indexK < numK; indexK++) { float real = phiR[indexK]; float imag = phiI[indexK]; phiMag[indexK] = real*real + imag*imag; } } inline void ComputeQCPU(int numK, int numX, struct kValues *kVals, float* x, float* y, float* z, float *__restrict__ Qr, float *__restrict__ Qi) { float expArg; float cosArg; float sinArg; int indexK, indexX; // Loop over the space and frequency domains. // Generally, numX > numK. // Since loops are not tiled, it's better that the loop with the smaller // cache footprint be innermost. for (indexX = 0; indexX < numX; indexX++) { // Sum the contributions to this point over all frequencies float Qracc = 0.0f; float Qiacc = 0.0f; for (indexK = 0; indexK < numK; indexK++) { expArg = PIx2 * (kVals[indexK].Kx * x[indexX] + kVals[indexK].Ky * y[indexX] + kVals[indexK].Kz * z[indexX]); cosArg = cosf(expArg); sinArg = sinf(expArg); float phi = kVals[indexK].PhiMag; Qracc += phi * cosArg; Qiacc += phi * sinArg; } Qr[indexX] = Qracc; Qi[indexX] = Qiacc; } } #endif void createDataStructsCPU(int numK, int numX, float** phiMag, float** Qr, float** Qi) { *phiMag = (float* ) memalign(16, numK * sizeof(float)); *Qr = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qr, 0, numX * sizeof(float)); *Qi = (float*) memalign(16, numX * sizeof (float)); memset((void *)*Qi, 0, numX * sizeof(float)); }
191f5587b61b36d1a9c9bb49177c6f815569b203.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" __global__ void add(int n, float *a, float *b, float *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { sum[i] = a[i] + b[i]; } }
191f5587b61b36d1a9c9bb49177c6f815569b203.cu
#include "includes.h" extern "C" __global__ void add(int n, float *a, float *b, float *sum) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) { sum[i] = a[i] + b[i]; } }
110567560f38745d4df799e105c75e5458b3e334.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpuops.h" #include <stdio.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #define INT_M 2 const int N = 20 * (1 << 20); double get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } //float operations __global__ void add_float(int n, float a, float *x, float *y, float *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i];//y=4 z[i] = a*x[i] + z[i];//z=5 z[i] = a*y[i] + z[i];//z=13 y[i] = a*x[i] + z[i];//y=15 y[i] = a*x[i] + y[i];//y=17 } } //int operations __global__ void add_int(int n, int a, int *x, int *y, int *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i];//y=4 z[i] = a*x[i] + z[i];//z=5 z[i] = a*y[i] + z[i];//z=13 y[i] = a*x[i] + z[i];//y=15 y[i] = a*x[i] + y[i];//y=17 } } float speed_test_int(int n_blocks, int n_cores){ int *x, *y, *z, *d_x, *d_y, *d_z; x = (int*)malloc(N*sizeof(int)); y = (int*)malloc(N*sizeof(int)); z = (int*)malloc(N*sizeof(int)); hipMalloc(&d_x, N*sizeof(int)); hipMalloc(&d_y, N*sizeof(int)); hipMalloc(&d_z, N*sizeof(int)); for (int i = 0; i < N; i++) { x[i] = 1; y[i] = 2; z[i] = 3; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_z, z, N*sizeof(int), hipMemcpyHostToDevice); hipEventRecord(start); printf("\nPerforming integer operation on %d elements . . .\n", N); //blocks, cores hipLaunchKernelGGL(( add_int), dim3(n_blocks), dim3(n_cores), 0, 0, N, 2, d_x, d_y, d_z); hipEventRecord(stop); hipMemcpy(y, d_y, N*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(z, d_z, N*sizeof(int), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float mseconds = 0; hipEventElapsedTime(&mseconds, start, stop); int maxError_y = 0; int maxError_z = 0; for (int i = 0; i < N; i++) { maxError_y = max(maxError_y, abs(y[i]-17)); maxError_z = max(maxError_z, abs(z[i]-13)); } hipFree( x ); hipFree( y ); hipFree( z ); // printf("\nMax error y: %fn", maxError_y); // printf("\nMax error z: %fn", maxError_z); printf("\nTime elapsed: %f", mseconds); return mseconds; } float speed_test_float(int n_blocks, int n_cores){ float *x, *y, *z, *d_x, *d_y, *d_z; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); z = (float*)malloc(N*sizeof(float)); hipMalloc(&d_x, N*sizeof(float)); hipMalloc(&d_y, N*sizeof(float)); hipMalloc(&d_z, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; z[i] = 3.0f; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_z, z, N*sizeof(float), hipMemcpyHostToDevice); hipEventRecord(start); printf("\nPerforming float operation on %d elements . . .\n", N); //blocks, cores hipLaunchKernelGGL(( add_float), dim3(n_blocks), dim3(n_cores), 0, 0, N, 2.0f, d_x, d_y, d_z); hipEventRecord(stop); hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(z, d_z, N*sizeof(float), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float mseconds = 0; hipEventElapsedTime(&mseconds, start, stop); hipFree( x ); hipFree( y ); hipFree( z ); float maxError_y = 0.0f; float maxError_z = 0.0f; for (int i = 0; i < N; i++) { maxError_y = max(maxError_y, abs(y[i]-17.0f)); maxError_z = max(maxError_z, abs(z[i]-13.0f)); } // printf("\nMax error y: %fn", maxError_y); // printf("\nMax error z: %fn", maxError_z); printf("\nTime elapsed: %f", mseconds); return mseconds; } double bandwidth_test(int size){ int n = size; char *x, *d_x, *d_y; x = (char*)malloc(n*sizeof(char)); hipMalloc(&d_x, n*sizeof(char)); hipMalloc(&d_y, n*sizeof(char)); for (int i = 0; i < n; i++) { x[i] = 'a'; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMemcpy(d_x, x, n*sizeof(char), hipMemcpyHostToDevice); hipEventRecord(start); printf("\nRuning memory bandwidth test with %d bytes . . .\n", n); hipEventRecord(stop); hipMemcpy(x, d_x, n*sizeof(char), hipMemcpyDeviceToHost); hipEventSynchronize(stop); float mseconds = 0; hipEventElapsedTime(&mseconds, start, stop); hipFree( x ); printf("\nTime elapsed: %fms", mseconds); double bandwidth = (n / (mseconds/1000))*1e-9; printf("\nBandwidth (GB/s): %f\n", bandwidth); return bandwidth; } int ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class { 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class { 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class { 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192}, // Fermi Generation (SM 3.0) GK10x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) { return nGpuArchCoresPerSM[index].Cores; } index++; } printf("MapSMtoCores SM %d.%d is undefined (please update to the latest SDK)!\n", major, minor); return -1; } TestResult gpu_test() { TestResult result; int dev =0, n_blocks = 2, n_cores = 384; //blocks per grid and threads per block //Getting number of GPU cores hipSetDevice(0); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); n_blocks = deviceProp.multiProcessorCount; n_cores = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount; printf("\n (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", n_blocks, ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),n_cores); //Running tests n_blocks = ceil(N+n_cores)/n_cores; printf("\n----- Runing GPU speed test - integer operations -----\n"); result.time_int = speed_test_int(n_blocks, n_cores); printf("\n----- Runing GPU speed test - float operations -----\n"); result.time_float = speed_test_float((N+511)/n_cores, n_cores); printf("\n----- Runing memory tests -----\n"); result.bandwidth[0] = bandwidth_test(1); result.bandwidth[1] = bandwidth_test(1000); result.bandwidth[2] = bandwidth_test(1000000); //bandwidth test return result; }
110567560f38745d4df799e105c75e5458b3e334.cu
#include "gpuops.h" #include <stdio.h> #include <time.h> #include <sys/time.h> #include <sys/resource.h> #define INT_M 2 const int N = 20 * (1 << 20); double get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } //float operations __global__ void add_float(int n, float a, float *x, float *y, float *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i];//y=4 z[i] = a*x[i] + z[i];//z=5 z[i] = a*y[i] + z[i];//z=13 y[i] = a*x[i] + z[i];//y=15 y[i] = a*x[i] + y[i];//y=17 } } //int operations __global__ void add_int(int n, int a, int *x, int *y, int *z) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] = a*x[i] + y[i];//y=4 z[i] = a*x[i] + z[i];//z=5 z[i] = a*y[i] + z[i];//z=13 y[i] = a*x[i] + z[i];//y=15 y[i] = a*x[i] + y[i];//y=17 } } float speed_test_int(int n_blocks, int n_cores){ int *x, *y, *z, *d_x, *d_y, *d_z; x = (int*)malloc(N*sizeof(int)); y = (int*)malloc(N*sizeof(int)); z = (int*)malloc(N*sizeof(int)); cudaMalloc(&d_x, N*sizeof(int)); cudaMalloc(&d_y, N*sizeof(int)); cudaMalloc(&d_z, N*sizeof(int)); for (int i = 0; i < N; i++) { x[i] = 1; y[i] = 2; z[i] = 3; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_z, z, N*sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(start); printf("\nPerforming integer operation on %d elements . . .\n", N); //blocks, cores add_int<<<n_blocks, n_cores>>>(N, 2, d_x, d_y, d_z); cudaEventRecord(stop); cudaMemcpy(y, d_y, N*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(z, d_z, N*sizeof(int), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float mseconds = 0; cudaEventElapsedTime(&mseconds, start, stop); int maxError_y = 0; int maxError_z = 0; for (int i = 0; i < N; i++) { maxError_y = max(maxError_y, abs(y[i]-17)); maxError_z = max(maxError_z, abs(z[i]-13)); } cudaFree( x ); cudaFree( y ); cudaFree( z ); // printf("\nMax error y: %fn", maxError_y); // printf("\nMax error z: %fn", maxError_z); printf("\nTime elapsed: %f", mseconds); return mseconds; } float speed_test_float(int n_blocks, int n_cores){ float *x, *y, *z, *d_x, *d_y, *d_z; x = (float*)malloc(N*sizeof(float)); y = (float*)malloc(N*sizeof(float)); z = (float*)malloc(N*sizeof(float)); cudaMalloc(&d_x, N*sizeof(float)); cudaMalloc(&d_y, N*sizeof(float)); cudaMalloc(&d_z, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; z[i] = 3.0f; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_z, z, N*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start); printf("\nPerforming float operation on %d elements . . .\n", N); //blocks, cores add_float<<<n_blocks, n_cores>>>(N, 2.0f, d_x, d_y, d_z); cudaEventRecord(stop); cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(z, d_z, N*sizeof(float), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float mseconds = 0; cudaEventElapsedTime(&mseconds, start, stop); cudaFree( x ); cudaFree( y ); cudaFree( z ); float maxError_y = 0.0f; float maxError_z = 0.0f; for (int i = 0; i < N; i++) { maxError_y = max(maxError_y, abs(y[i]-17.0f)); maxError_z = max(maxError_z, abs(z[i]-13.0f)); } // printf("\nMax error y: %fn", maxError_y); // printf("\nMax error z: %fn", maxError_z); printf("\nTime elapsed: %f", mseconds); return mseconds; } double bandwidth_test(int size){ int n = size; char *x, *d_x, *d_y; x = (char*)malloc(n*sizeof(char)); cudaMalloc(&d_x, n*sizeof(char)); cudaMalloc(&d_y, n*sizeof(char)); for (int i = 0; i < n; i++) { x[i] = 'a'; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMemcpy(d_x, x, n*sizeof(char), cudaMemcpyHostToDevice); cudaEventRecord(start); printf("\nRuning memory bandwidth test with %d bytes . . .\n", n); cudaEventRecord(stop); cudaMemcpy(x, d_x, n*sizeof(char), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float mseconds = 0; cudaEventElapsedTime(&mseconds, start, stop); cudaFree( x ); printf("\nTime elapsed: %fms", mseconds); double bandwidth = (n / (mseconds/1000))*1e-9; printf("\nBandwidth (GB/s): %f\n", bandwidth); return bandwidth; } int ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class { 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class { 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class { 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192}, // Fermi Generation (SM 3.0) GK10x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor) ) { return nGpuArchCoresPerSM[index].Cores; } index++; } printf("MapSMtoCores SM %d.%d is undefined (please update to the latest SDK)!\n", major, minor); return -1; } TestResult gpu_test() { TestResult result; int dev =0, n_blocks = 2, n_cores = 384; //blocks per grid and threads per block //Getting number of GPU cores cudaSetDevice(0); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); n_blocks = deviceProp.multiProcessorCount; n_cores = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount; printf("\n (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", n_blocks, ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),n_cores); //Running tests n_blocks = ceil(N+n_cores)/n_cores; printf("\n----- Runing GPU speed test - integer operations -----\n"); result.time_int = speed_test_int(n_blocks, n_cores); printf("\n----- Runing GPU speed test - float operations -----\n"); result.time_float = speed_test_float((N+511)/n_cores, n_cores); printf("\n----- Runing memory tests -----\n"); result.bandwidth[0] = bandwidth_test(1); result.bandwidth[1] = bandwidth_test(1000); result.bandwidth[2] = bandwidth_test(1000000); //bandwidth test return result; }
f06ecb44b86822c82ab41a83bcdd3174eb87121e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. //cudaStatus = hipSetDevice(0); //if (cudaStatus != hipSuccess) { // fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; //} // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
f06ecb44b86822c82ab41a83bcdd3174eb87121e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. //cudaStatus = cudaSetDevice(0); //if (cudaStatus != cudaSuccess) { // fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); // goto Error; //} // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
c12ebcff698933f368dd153d74637f615bdc3f1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include "common.h" template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_data += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; top_data[maxind] = bottom_data[index]; } } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_diff += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; bottom_diff[index] = top_diff[maxind]; } } static int cunn_SpatialMaxUnpooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int owidth = luaT_getfieldcheckint(L, 1, "owidth"); int oheight = luaT_getfieldcheckint(L, 1, "oheight"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THAssert(THCudaTensor_checkGPU(state, 3, input, output, indices)); luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, oheight, owidth); THCudaTensor_zero(state, output); int count = THCudaTensor_nElement(state, input); hipLaunchKernelGGL(( MaxUnpoolForward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCudaTensor_data(state, input), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, output)); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, oheight, owidth); THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialMaxUnpooling.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_SpatialMaxUnpooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int owidth = luaT_getfieldcheckint(L, 1, "owidth"); int oheight = luaT_getfieldcheckint(L, 1, "oheight"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, indices, gradInput)); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); hipLaunchKernelGGL(( MaxUnpoolBackward) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, gradInput)); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialMaxUnpooling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_SpatialMaxUnpooling__ [] = { {"SpatialMaxUnpooling_updateOutput", cunn_SpatialMaxUnpooling_updateOutput}, {"SpatialMaxUnpooling_updateGradInput", cunn_SpatialMaxUnpooling_updateGradInput}, {NULL, NULL} }; void cunn_SpatialMaxUnpooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialMaxUnpooling__, "nn"); lua_pop(L,1); }
c12ebcff698933f368dd153d74637f615bdc3f1e.cu
#include "utils.h" #include "common.h" template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //index here indices the input pixels int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_data += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; top_data[maxind] = bottom_data[index]; } } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int iheight, const int iwidth, const int oheight, const int owidth, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int c = (index / iwidth / iheight) % channels; int n = index / iwidth / iheight / channels; top_diff += (n*channels + c)*oheight*owidth; int maxind = bottom_mask[index]-1; bottom_diff[index] = top_diff[maxind]; } } static int cunn_SpatialMaxUnpooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int owidth = luaT_getfieldcheckint(L, 1, "owidth"); int oheight = luaT_getfieldcheckint(L, 1, "oheight"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THAssert(THCudaTensor_checkGPU(state, 3, input, output, indices)); luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); THCudaTensor_resize4d(state, output, batchSize, nInputPlane, oheight, owidth); THCudaTensor_zero(state, output); int count = THCudaTensor_nElement(state, input); MaxUnpoolForward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCudaTensor_data(state, input), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, output)); if(input->nDimension == 3) THCudaTensor_resize3d(state, output, nInputPlane, oheight, owidth); THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialMaxUnpooling.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static int cunn_SpatialMaxUnpooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int owidth = luaT_getfieldcheckint(L, 1, "owidth"); int oheight = luaT_getfieldcheckint(L, 1, "oheight"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); THCudaTensor *indices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "indices", "torch.CudaTensor"); THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, indices, gradInput)); long nInputCols, nInputRows, nInputPlane, batchSize; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } input = THCudaTensor_newContiguous(state, input); indices = THCudaTensor_newContiguous(state, indices); gradOutput = THCudaTensor_newContiguous(state, gradOutput); THCudaTensor_resizeAs(state, gradInput, input); int count = THCudaTensor_nElement(state, input); MaxUnpoolBackward <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCudaTensor_data(state, gradOutput), THCudaTensor_data(state, indices), batchSize, nInputPlane, nInputRows, nInputCols, oheight, owidth, THCudaTensor_data(state, gradInput)); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialMaxUnpooling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } // clean THCudaTensor_free(state, input); THCudaTensor_free(state, gradOutput); return 1; } static const struct luaL_Reg cunn_SpatialMaxUnpooling__ [] = { {"SpatialMaxUnpooling_updateOutput", cunn_SpatialMaxUnpooling_updateOutput}, {"SpatialMaxUnpooling_updateGradInput", cunn_SpatialMaxUnpooling_updateGradInput}, {NULL, NULL} }; void cunn_SpatialMaxUnpooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialMaxUnpooling__, "nn"); lua_pop(L,1); }
04db75c6f193efc2a0758883484827497b3e8e29.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> // add() will execute on the device and will be called from the host // as add runs on the device, we need to use pointers because a,b and c must point to device memory and we need to allocate memory on the GPU __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(void) { // host copies of a, b, c int a[] = { 1, 2, 3 }; int b[] = { 4, 5, 6 }; int c[] = { 0, 0, 0 }; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = sizeof(int) * 3; // we need to allocate memory on the GPU // allocate space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // copy inputs to device hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice); // launch add() kernel on the GPU with a single thread hipLaunchKernelGGL(( add), dim3(1),dim3(3), 0, 0, d_a, d_b, d_c); // copy result back to host hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost); // don't forget to free the memory hipFree(d_a); hipFree(d_b); hipFree(d_c); for (size_t i = 0; i < 3; i++) { printf("Val is : %d \n", c[i]); } // check error printf("CUDA error: %s\n", hipGetErrorString(hipGetLastError())); return 0; }
04db75c6f193efc2a0758883484827497b3e8e29.cu
#include <stdio.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> // add() will execute on the device and will be called from the host // as add runs on the device, we need to use pointers because a,b and c must point to device memory and we need to allocate memory on the GPU __global__ void add(int *a, int *b, int *c) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(void) { // host copies of a, b, c int a[] = { 1, 2, 3 }; int b[] = { 4, 5, 6 }; int c[] = { 0, 0, 0 }; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = sizeof(int) * 3; // we need to allocate memory on the GPU // allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // launch add() kernel on the GPU with a single thread add<<<1,3>>>(d_a, d_b, d_c); // copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // don't forget to free the memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); for (size_t i = 0; i < 3; i++) { printf("Val is : %d \n", c[i]); } // check error printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError())); return 0; }
9d0859e7fc2e6d2fb115074e534334ed623c1e7e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include <algorithm> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" #define SSSP_INF 1073741824 enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SyncMode syncMethod; enum SmemMode {UseSmem, UseNoSmem}; enum SmemMode smemMethod; enum EdgeListMode {input=0,source=1,destination=2}; enum EdgeListMode edge_list_mode; // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } int source_order_compare( edge_list a , edge_list b ) { return a.srcIndex < b.srcIndex; } int destination_order_compare( edge_list a , edge_list b ) { return a.destIndex < b.destIndex; } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Edge List Order Mode : E.g., --edgelist input,source,destination\n"; try { std::ifstream inputFile; std::ofstream outputFile; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ) syncMethod = InCore; else if ( !strcmp(argv[iii+1], "outcore") ) syncMethod = OutOfCore; else{ std::cerr << "\n Un-recognized sync parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { printf("The value of the argv[iii+1] is %s\n",argv[iii+1]); if ( !strcmp(argv[iii+1], "yes") ) smemMethod = UseSmem; else if (!strcmp(argv[iii+1], "no") ) smemMethod = UseNoSmem; else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--edgelist") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "input") ) edge_list_mode = input; else if ( !strcmp(argv[iii+1], "source") ) edge_list_mode = source; else if ( !strcmp(argv[iii+1], "destination") ) edge_list_mode = destination; else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/) bsize = std::atoi( argv[iii+1] ); else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/) bcount = std::atoi( argv[iii+1] ); if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit; throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ) openFileToAccess< std::ofstream >( outputFile, "out.txt" ); CUDAErrorCheck( hipSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; std::cout << "Executing entry_point.cu \n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); std::vector<edge_list> edgeList( 0 ); uint nEdges = parse_graph::parse(inputFile,parsedGraph,edgeList,arbparam,nonDirectedGraph); std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Sort the edge list. ********************************/ switch(edge_list_mode) { case 0: break; case 1: std::sort(edgeList.begin(),edgeList.end(),source_order_compare); break; case 2: std::sort(edgeList.begin(),edgeList.end(),destination_order_compare); break; } /* for( int i = 0 ; i < ::min(nEdges,(uint)20) ; i++ ) { std::cout << "Edge i : " << i << " ( " << edgeList[i].srcIndex << " , " << edgeList[i].destIndex << ") , weight : " << edgeList[i].weight << " \n"; }*/ std::cout<<"Arranging for the Bellman-Ford\n"; uint vertices = parsedGraph.size(); uint edges = nEdges; uint distance[vertices]; setTime(); std::cout<<"The number of vertices are:"<< vertices<<std::endl; std::fill_n(distance,vertices,SSSP_INF); int start_vertex = 0; distance[start_vertex] = 0; std::cout<<"Starting the bellman-ford\n"; for(uint i = 0 ; i < vertices ; i++ ) { bool change = false; for(uint j = 0 ; j < edges ; j++ ){ int source = edgeList[j].srcIndex; int destination = edgeList[j].destIndex; int weight = edgeList[j].weight; if(distance[source] + weight < distance[destination]) { distance[destination] = distance[source] + weight; change = true; } } if( !change ) break; } std::cout << "Sequential Bellman-Ford Takes " << getTime() << "ms.\n"; /* Process the graph. ********************************/ switch(processingMethod){ case ProcessingType::Push: puller(&parsedGraph, bsize, bcount, &edgeList,syncMethod,smemMethod,edge_list_mode); break; case ProcessingType::Neighbor: neighborHandler(&parsedGraph, bsize, bcount, &edgeList,syncMethod,smemMethod,edge_list_mode); //(std::vector<initial_vertex> * peeps, int blockSize, int blockNum,std::vector<edge_list> *edgeList,int syncMethod, int smemMethod,int edge_list_mode); break; default: own(&parsedGraph, bsize, bcount); } /******************************** * Do the comparision b/w parallel and sequential ********************************/ int num_of_diff = 0; for(int i = 0 ; i < vertices ; i++ ) { if( parsedGraph[i].vertexValue.distance != distance[i]) { //std::cout << "Sequential Distance for v " << i << " is : " << distance[i] << " and parallel gave : " << parsedGraph[i].vertexValue.distance << " \n."; num_of_diff++; } } if( num_of_diff == 0 ) { std::cout << "Good Job! Serial and Parallel versions match.\n"; } else { std::cout << "Warning!!! Serial and Parallel mismatch " << num_of_diff << " distance values.\n"; } /******************************** * It's done here. ********************************/ CUDAErrorCheck( hipDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } }
9d0859e7fc2e6d2fb115074e534334ed623c1e7e.cu
#include <cstring> #include <stdexcept> #include <cstdlib> #include <fstream> #include <iostream> #include <vector> #include <algorithm> #include "utils.h" #include "cuda_error_check.cuh" #include "initial_graph.hpp" #include "parse_graph.hpp" #include "opt.cu" #include "impl2.cu" #include "impl1.cu" #define SSSP_INF 1073741824 enum class ProcessingType {Push, Neighbor, Own, Unknown}; enum SyncMode {InCore, OutOfCore}; enum SyncMode syncMethod; enum SmemMode {UseSmem, UseNoSmem}; enum SmemMode smemMethod; enum EdgeListMode {input=0,source=1,destination=2}; enum EdgeListMode edge_list_mode; // Open files safely. template <typename T_file> void openFileToAccess( T_file& input_file, std::string file_name ) { input_file.open( file_name.c_str() ); if( !input_file ) throw std::runtime_error( "Failed to open specified file: " + file_name + "\n" ); } int source_order_compare( edge_list a , edge_list b ) { return a.srcIndex < b.srcIndex; } int destination_order_compare( edge_list a , edge_list b ) { return a.destIndex < b.destIndex; } // Execution entry point. int main( int argc, char** argv ) { std::string usage = "\tRequired command line arguments:\n\ Input file: E.g., --input in.txt\n\ Block size: E.g., --bsize 512\n\ Block count: E.g., --bcount 192\n\ Output path: E.g., --output output.txt\n\ Processing method: E.g., --method bmf (bellman-ford), or tpe (to-process-edge), or opt (one further optimizations)\n\ Shared memory usage: E.g., --usesmem yes, or no \n\ Sync method: E.g., --sync incore, or outcore\n\ Edge List Order Mode : E.g., --edgelist input,source,destination\n"; try { std::ifstream inputFile; std::ofstream outputFile; int selectedDevice = 0; int bsize = 0, bcount = 0; int vwsize = 32; int threads = 1; long long arbparam = 0; bool nonDirectedGraph = false; // By default, the graph is directed. ProcessingType processingMethod = ProcessingType::Unknown; syncMethod = OutOfCore; /******************************** * GETTING INPUT PARAMETERS. ********************************/ for( int iii = 1; iii < argc; ++iii ) if ( !strcmp(argv[iii], "--method") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "bmf") ) processingMethod = ProcessingType::Push; else if ( !strcmp(argv[iii+1], "tpe") ) processingMethod = ProcessingType::Neighbor; else if ( !strcmp(argv[iii+1], "opt") ) processingMethod = ProcessingType::Own; else{ std::cerr << "\n Un-recognized method parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--sync") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "incore") ) syncMethod = InCore; else if ( !strcmp(argv[iii+1], "outcore") ) syncMethod = OutOfCore; else{ std::cerr << "\n Un-recognized sync parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--usesmem") && iii != argc-1 ) { printf("The value of the argv[iii+1] is %s\n",argv[iii+1]); if ( !strcmp(argv[iii+1], "yes") ) smemMethod = UseSmem; else if (!strcmp(argv[iii+1], "no") ) smemMethod = UseNoSmem; else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if ( !strcmp(argv[iii], "--edgelist") && iii != argc-1 ) { if ( !strcmp(argv[iii+1], "input") ) edge_list_mode = input; else if ( !strcmp(argv[iii+1], "source") ) edge_list_mode = source; else if ( !strcmp(argv[iii+1], "destination") ) edge_list_mode = destination; else{ std::cerr << "\n Un-recognized usesmem parameter value \n\n"; exit; } } else if( !strcmp( argv[iii], "--input" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ifstream >( inputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--output" ) && iii != argc-1 /*is not the last one*/) openFileToAccess< std::ofstream >( outputFile, std::string( argv[iii+1] ) ); else if( !strcmp( argv[iii], "--bsize" ) && iii != argc-1 /*is not the last one*/) bsize = std::atoi( argv[iii+1] ); else if( !strcmp( argv[iii], "--bcount" ) && iii != argc-1 /*is not the last one*/) bcount = std::atoi( argv[iii+1] ); if(bsize <= 0 || bcount <= 0){ std::cerr << "Usage: " << usage; exit; throw std::runtime_error("\nAn initialization error happened.\nExiting."); } if( !inputFile.is_open() || processingMethod == ProcessingType::Unknown ) { std::cerr << "Usage: " << usage; throw std::runtime_error( "\nAn initialization error happened.\nExiting." ); } if( !outputFile.is_open() ) openFileToAccess< std::ofstream >( outputFile, "out.txt" ); CUDAErrorCheck( cudaSetDevice( selectedDevice ) ); std::cout << "Device with ID " << selectedDevice << " is selected to process the graph.\n"; std::cout << "Executing entry_point.cu \n"; /******************************** * Read the input graph file. ********************************/ std::cout << "Collecting the input graph ...\n"; std::vector<initial_vertex> parsedGraph( 0 ); std::vector<edge_list> edgeList( 0 ); uint nEdges = parse_graph::parse(inputFile,parsedGraph,edgeList,arbparam,nonDirectedGraph); std::cout << "Input graph collected with " << parsedGraph.size() << " vertices and " << nEdges << " edges.\n"; /******************************** * Sort the edge list. ********************************/ switch(edge_list_mode) { case 0: break; case 1: std::sort(edgeList.begin(),edgeList.end(),source_order_compare); break; case 2: std::sort(edgeList.begin(),edgeList.end(),destination_order_compare); break; } /* for( int i = 0 ; i < std::min(nEdges,(uint)20) ; i++ ) { std::cout << "Edge i : " << i << " ( " << edgeList[i].srcIndex << " , " << edgeList[i].destIndex << ") , weight : " << edgeList[i].weight << " \n"; }*/ std::cout<<"Arranging for the Bellman-Ford\n"; uint vertices = parsedGraph.size(); uint edges = nEdges; uint distance[vertices]; setTime(); std::cout<<"The number of vertices are:"<< vertices<<std::endl; std::fill_n(distance,vertices,SSSP_INF); int start_vertex = 0; distance[start_vertex] = 0; std::cout<<"Starting the bellman-ford\n"; for(uint i = 0 ; i < vertices ; i++ ) { bool change = false; for(uint j = 0 ; j < edges ; j++ ){ int source = edgeList[j].srcIndex; int destination = edgeList[j].destIndex; int weight = edgeList[j].weight; if(distance[source] + weight < distance[destination]) { distance[destination] = distance[source] + weight; change = true; } } if( !change ) break; } std::cout << "Sequential Bellman-Ford Takes " << getTime() << "ms.\n"; /* Process the graph. ********************************/ switch(processingMethod){ case ProcessingType::Push: puller(&parsedGraph, bsize, bcount, &edgeList,syncMethod,smemMethod,edge_list_mode); break; case ProcessingType::Neighbor: neighborHandler(&parsedGraph, bsize, bcount, &edgeList,syncMethod,smemMethod,edge_list_mode); //(std::vector<initial_vertex> * peeps, int blockSize, int blockNum,std::vector<edge_list> *edgeList,int syncMethod, int smemMethod,int edge_list_mode); break; default: own(&parsedGraph, bsize, bcount); } /******************************** * Do the comparision b/w parallel and sequential ********************************/ int num_of_diff = 0; for(int i = 0 ; i < vertices ; i++ ) { if( parsedGraph[i].vertexValue.distance != distance[i]) { //std::cout << "Sequential Distance for v " << i << " is : " << distance[i] << " and parallel gave : " << parsedGraph[i].vertexValue.distance << " \n."; num_of_diff++; } } if( num_of_diff == 0 ) { std::cout << "Good Job! Serial and Parallel versions match.\n"; } else { std::cout << "Warning!!! Serial and Parallel mismatch " << num_of_diff << " distance values.\n"; } /******************************** * It's done here. ********************************/ CUDAErrorCheck( cudaDeviceReset() ); std::cout << "Done.\n"; return( EXIT_SUCCESS ); } catch( const std::exception& strException ) { std::cerr << strException.what() << "\n"; return( EXIT_FAILURE ); } catch(...) { std::cerr << "An exception has occurred." << std::endl; return( EXIT_FAILURE ); } }
bcd6d79fb36e574cc7592cdce194e774d7780648.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void pReLUKernel(const int n, const float negativeSlope, const float* input, float* output) { for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < n; i += gridDim.x * nthdsPerCTA) { output[i] = input[i] > 0 ? input[i] : input[i] * negativeSlope; } } pluginStatus_t lReLUGPU(hipStream_t stream, const int n, const float negativeSlope, const void* input, void* output) { const int BS = 512; const int GS = (n + BS - 1) / BS; hipLaunchKernelGGL(( pReLUKernel<BS>), dim3(GS), dim3(BS), 0, stream, n, negativeSlope, (const float*) input, (float*) output); return STATUS_SUCCESS; } pluginStatus_t lReLUInference( hipStream_t stream, const int n, const float negativeSlope, const void* input, void* output) { return lReLUGPU(stream, n, negativeSlope, (const float*) input, (float*) output); }
bcd6d79fb36e574cc7592cdce194e774d7780648.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/kernel.h" template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void pReLUKernel(const int n, const float negativeSlope, const float* input, float* output) { for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < n; i += gridDim.x * nthdsPerCTA) { output[i] = input[i] > 0 ? input[i] : input[i] * negativeSlope; } } pluginStatus_t lReLUGPU(cudaStream_t stream, const int n, const float negativeSlope, const void* input, void* output) { const int BS = 512; const int GS = (n + BS - 1) / BS; pReLUKernel<BS><<<GS, BS, 0, stream>>>(n, negativeSlope, (const float*) input, (float*) output); return STATUS_SUCCESS; } pluginStatus_t lReLUInference( cudaStream_t stream, const int n, const float negativeSlope, const void* input, void* output) { return lReLUGPU(stream, n, negativeSlope, (const float*) input, (float*) output); }
c1b13f13e8f19dbe3d652052d1d75374aac006a8.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <helpers/svd.h> #include <hip/hip_runtime.h> #include <rocblas.h> #include <cusolverDn.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> namespace sd { namespace ops { namespace helpers { // FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions appropriately) template <typename T> __global__ static void inverseColumnSignCuda(void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) { T* u = reinterpret_cast<T*>(vu); T* v = reinterpret_cast<T*>(vv); __shared__ int rank, uLastButOneColumn, vLastButOneColumn; // uRank = vRank __shared__ Nd4jLong uLen, vLen; __shared__ Nd4jLong *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(uShapeInfo); uLen = shape::length(uShapeInfo); vLen = shape::length(vShapeInfo); uLastButOneColumn = uShapeInfo[rank] - 2; vLastButOneColumn = vShapeInfo[rank - 1] - 2; } __syncthreads(); const auto ind = threadIdx.x + blockIdx.x * blockDim.x; auto coords = sharedMem + threadIdx.x * rank; // u for (Nd4jLong i = ind; i < uLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, uShapeInfo, coords); if(coords[rank - 1] == 0 || coords[rank - 1] == uLastButOneColumn) // do not change sign in first and last but one columns continue; const auto uOffset = shape::getOffset(uShapeInfo, coords); u[uOffset] = -u[uOffset]; } // v for (Nd4jLong i = ind; i < vLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, vShapeInfo, coords); if(coords[rank - 2] == 0 || coords[rank - 2] == vLastButOneColumn) // do not change sign in first and last but one columns continue; const auto vOffset = shape::getOffset(vShapeInfo, coords); v[vOffset] = -v[vOffset]; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void inverseColumnSignCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) { hipLaunchKernelGGL(( inverseColumnSignCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vu, uShapeInfo, vv, vShapeInfo); } BUILD_SINGLE_TEMPLATE(template void inverseColumnSignCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t* stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// static void svdQR(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* VT, const bool fullUV, const bool calcUV) { // since cusa api hipsolverDnDgesvd/hipsolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns && A_order = 'f' // we make this function to have deal with 2 valid cases only: // 1) A_rows >= A_columns and A_corder = 'f' // 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order // if 1) or 2) are not met then throw exception // A [m, n] // S [n] // U [m, m] or [m, n] if fullUV = false and m > n // VT [n, n] or [m, n] if fullUV = false and m < n if(A->rankOf() != 2) throw std::runtime_error("svdQR: rank of A array is not equal 2 !"); auto m = A->sizeAt(0); auto n = A->sizeAt(1); const int minDim = m < n ? m : n; const char orderA = A->ordering(); if(m < n) throw std::runtime_error("svdQR: due to cuda api input constrains given shape of A array are not valid !"); if(std::vector<Nd4jLong>({minDim}) != S->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of S array !"); if(calcUV) { if(fullUV && std::vector<Nd4jLong>({m,m}) != U->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of U array !"); else if(!fullUV && std::vector<Nd4jLong>({m,minDim}) != U->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of U array !"); if(fullUV && std::vector<Nd4jLong>({n,n}) != VT->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of VT array !"); else if(!fullUV && std::vector<Nd4jLong>({minDim,n}) != VT->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of VT array !"); } NDArray* pA = const_cast<NDArray*>(A); NDArray* pS = S; NDArray* pU = U; NDArray* pVT = VT; std::vector<NDArray*> toDelete; if(pA->ews() != 1 || pA->ordering() == 'c') { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } if(calcUV) { if(pU->ews() != 1 || pU->ordering() == 'c') { pU = new NDArray(U->dup('f')); toDelete.push_back(pU); } if(pVT->ews() != 1 || pVT->ordering() == 'c') { pVT = new NDArray(VT->dup('f')); toDelete.push_back(pVT); } } std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex()); // create cusolverDn handle hipsolverDnHandle_t* handle = (hipsolverDnHandle_t*)context->getCusolverHandle(); //nullptr; //cusolverStatus_t status = hipsolverDnCreate(&handle); if(handle == nullptr) throw cuda_exception::build("svdQR: cuda failed !", -1); // stream auto status = hipsolverDnSetStream(*handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = hipsolverDnDgesvd_bufferSize(*handle, m, n, &lwork); else if(A->dataType() == DataType::FLOAT32) status = hipsolverDnSgesvd_bufferSize(*handle, m, n, &lwork); else throw std::invalid_argument("svdQR: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); // allocate memory for dWork void* dWork = nullptr; hipError_t status2 = hipMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != hipSuccess) throw cuda_exception::build("svdQR: cuda failed !", status2); signed char jobu, jobvt; if(calcUV) { if(fullUV) jobu = jobvt = 'A'; else jobu = jobvt = 'S'; } else { jobu = jobvt = 'N'; } int *devInfo = nullptr; void* rWork = nullptr; int lda(m), ldu, ldvt; if(calcUV) { ldu = pU->sizeAt(0); ldvt = pVT->sizeAt(0); } PointersManager manager(context, "svdQR"); NDArray::prepareSpecialUse({pS, pU, pVT}, {pA}); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = hipsolverDnDgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pVT->specialBuffer()) : nullptr, ldvt, reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo); } else if(A->dataType() == DataType::FLOAT32) { status = hipsolverDnSgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pVT->specialBuffer()) : nullptr, ldvt, reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo); } else throw std::invalid_argument("svdQR: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pVT}, {pA}); S->assign(pS); if(calcUV) { U->assign(pU); VT->assign(pVT); } for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) hipFree(devInfo); if (dWork ) hipFree(dWork); if (rWork) hipFree(rWork); // if(handle) // hipsolverDnDestroy(handle); // hipDeviceReset(); } ////////////////////////////////////////////////////////////////////////// static void svdJcb(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV, const bool calcUV) { // A [m, n] // S [n] // U [m, m] or [m, n] if fullUV = false and m > n // V [n, n] or [n, m] if fullUV = false and m < n if(A->rankOf() != 2) throw std::runtime_error("svdJcb: rank of A array is not equal 2 !"); int m = A->sizeAt(0); int n = A->sizeAt(1); const int minDim = m < n ? m : n; if(std::vector<Nd4jLong>({minDim}) != S->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of S array !"); if(calcUV) { if(fullUV && std::vector<Nd4jLong>({m,m}) != U->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of U array !"); else if(!fullUV && std::vector<Nd4jLong>({m,minDim}) != U->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of U array !"); if(fullUV && std::vector<Nd4jLong>({n,n}) != V->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of V array !"); else if(!fullUV && std::vector<Nd4jLong>({n,minDim}) != V->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of V array !"); } NDArray* pA = const_cast<NDArray*>(A); const bool aForder = m == 1 || A->strideAt(0) == 1; const bool aCorder = n == 1 || A->strideAt(1) == 1; const bool transA = !aForder && aCorder; const bool dupA = !aForder && !aCorder; std::vector<NDArray*> toDelete; if(dupA) { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } NDArray* pS = S; if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } NDArray *pU(nullptr), *pV(nullptr); int lda = transA ? pA->strideAt(0) : pA->strideAt(1); int ldu(transA ? n : m), ldv(transA ? m : n); bool uForder(true), vForder(true); if(calcUV) { pU = transA ? V : U; pV = transA ? U : V; uForder = pU->sizeAt(0) == 1 || pU->strideAt(0) == 1; vForder = pV->sizeAt(0) == 1 || pV->strideAt(0) == 1; if(!uForder) { pU = new NDArray(pU->dup('f')); toDelete.push_back(pU); } if(!vForder) { pV = new NDArray(pV->dup('f')); toDelete.push_back(pV); } ldu = pU->strideAt(1); ldv = pV->strideAt(1); } std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex()); // create cusolverDn handle hipsolverDnHandle_t* handle = (hipsolverDnHandle_t*)context->getCusolverHandle(); //cusolverStatus_t status = hipsolverDnCreate(&handle); if(handle == nullptr) throw cuda_exception::build("svdJcb: cuda failed !", -1); // stream auto status = hipsolverDnSetStream(*handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); // set parameters hipsolverGesvdjInfo_t gesvdjParams = nullptr; status = hipsolverDnCreateGesvdjInfo(&gesvdjParams); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); int *devInfo = nullptr; const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; const int econ = !fullUV; if(transA) math::nd4j_swap<int>(m, n); // *** avoid bug in cuda API *** void* nullPtr = nullptr; NDArray* arrToAvoidBugInAPI = nullptr; if(!calcUV && m != n) { int maxDim = m > n ? m : n; arrToAvoidBugInAPI = new NDArray('c', {maxDim, maxDim}, pA->dataType(), context); nullPtr = arrToAvoidBugInAPI->specialBuffer(); } // ****************** NDArray::prepareSpecialUse({pS, pU, pV}, {pA}); // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = hipsolverDnDgesvdj_bufferSize(*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, &lwork, gesvdjParams); else if(A->dataType() == DataType::FLOAT32) status = hipsolverDnSgesvdj_bufferSize(*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, &lwork, gesvdjParams); else throw std::invalid_argument("svdJcb: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); // allocate memory dWork void* dWork = nullptr; auto status2 = hipMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != hipSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2); PointersManager manager(context, "svdJcb"); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = hipsolverDnDgesvdj(*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams); } else if(A->dataType() == DataType::FLOAT32) { status = hipsolverDnSgesvdj(*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams); } else throw std::invalid_argument("svdJcb: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pV}, {pA}); if(S->ews() != 1) S->assign(pS); if(calcUV) { if(!uForder) U->assign(transA ? pV : pU); if(!vForder) V->assign(transA ? pU : pV); } if(!calcUV && m != n) delete arrToAvoidBugInAPI; for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) hipFree(devInfo); if (dWork ) hipFree(dWork); // if(handle) // hipsolverDnDestroy(handle); if(gesvdjParams) hipsolverDnDestroyGesvdjInfo(gesvdjParams); // hipDeviceReset(); } ////////////////////////////////////////////////////////////////////////// static void svdBatched(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV, const bool calcUV) { // A [..., m, n] // S [..., n] // U [..., m, m] or [..., m, n] if fullUV = false and m > n // V [..., n, n] or [..., n, m] if fullUV = false and m < n auto m = A->sizeAt(-2); auto n = A->sizeAt(-1); const int minDim = m < n ? m : n; const Nd4jLong bS = A->lengthOf() / (m * n); if(m > 32 || n > 32) throw std::runtime_error("svdBatched: numbers of rows and columns should be <= 32 !"); if(minDim != S->sizeAt(-1)) throw std::runtime_error("svdBatched: wrong shape of S array !"); if(calcUV) { if(U->sizeAt(-2) != m) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(U->sizeAt(-1) != (fullUV ? m : minDim)) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(U->lengthOf() / (U->sizeAt(-2) * U->sizeAt(-1)) != bS) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(V->sizeAt(-2) != n) throw std::runtime_error("svdBatched: wrong shape of V array !"); if(V->sizeAt(-1) != (fullUV ? n : minDim)) throw std::runtime_error("svdBatched: wrong shape of V array !"); if(V->lengthOf() / (V->sizeAt(-2) * V->sizeAt(-1)) != bS) throw std::runtime_error("svdBatched: wrong shape of V array !"); } NDArray* pA = const_cast<NDArray*>(A); NDArray* pS = S; NDArray* pU = U; NDArray* pV = V; std::vector<NDArray*> toDelete; if(pA->ews() != 1 || pA->ordering() == 'c') { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } if(calcUV) { if(pU->ews() != 1 || pU->ordering() == 'c') { pU = new NDArray(U->dup('f')); toDelete.push_back(pU); } if(pV->ews() != 1 || pV->ordering() == 'c') { pV = new NDArray(V->dup('f')); toDelete.push_back(pV); } } // create cusolverDn handle hipsolverDnHandle_t handle = nullptr; cusolverStatus_t status = hipsolverDnCreate(&handle); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // stream status = hipsolverDnSetStream(handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // set parameters hipsolverGesvdjInfo_t gesvdjParams = nullptr; status = hipsolverDnCreateGesvdjInfo(&gesvdjParams); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); status = hipsolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); status = hipsolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // devInfo int *devInfo = nullptr; auto status2 = hipMalloc((void**)&devInfo, sizeof(int) * bS); if(status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); status2 = hipDeviceSynchronize(); if(status2 != hipSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2); const hipsolverEigMode_t jobz = calcUV ? HIPSOLVER_EIG_MODE_VECTOR : HIPSOLVER_EIG_MODE_NOVECTOR; int lda(m), ldu, ldv; if(calcUV) { ldu = pU->sizeAt(-2); ldv = pV->sizeAt(-2); } // Ak (i,j) = A[i + 5*j + 25*k] // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = hipsolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS); else if(A->dataType() == DataType::FLOAT32) status = hipsolverDnSgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS); else throw std::invalid_argument("svdBatched: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // allocate memory dWork void* dWork = nullptr; status2 = hipMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); status2 = hipDeviceSynchronize(); if(status2 != hipSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); PointersManager manager(context, "svdBatched"); NDArray::prepareSpecialUse({pS, pU, pV}, {pA}); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = hipsolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS); } else if(A->dataType() == DataType::FLOAT32) { status = hipsolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS); } else throw std::invalid_argument("svdBatched: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pV}, {pA}); S->assign(pS); if(calcUV) { U->assign(pU); V->assign(pV); } for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) hipFree(devInfo); if (dWork ) hipFree(dWork); if(handle) hipsolverDnDestroy(handle); if(gesvdjParams) hipsolverDnDestroyGesvdjInfo(gesvdjParams); // hipDeviceReset(); } //////////////////////////////////////////////////////////////////// void svd(sd::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV, const bool calcUV, const int switchNum) { NDArray* S = outArrs[0]; NDArray* U = outArrs[1]; // NDArray VT = outArrs[2]->transpose(); NDArray* V = outArrs[2]; NDArray::prepareSpecialUse({S, U, V}, {x}); if(x->rankOf() == 2) { // svdQR(context, x, S, U, VT, fullUV, calcUV); svdJcb(context, x, S, U, V, fullUV, calcUV); } else { // svdBatched(context, *x, *S, *U, *V, fullUV, calcUV); ResultSet *tadsU(nullptr), *tadsV(nullptr); auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1}); auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1}); if(calcUV) { tadsU = new ResultSet(U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1})); tadsV = new ResultSet(V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1})); } for (int i = 0; i < tadsX.size(); ++i) svdJcb(context, tadsX.at(i), tadsS.at(i), calcUV ? tadsU->at(i) : nullptr, calcUV ? tadsV->at(i) : nullptr, fullUV, calcUV); if(calcUV) { delete tadsU; delete tadsV; } } NDArray::registerSpecialUse({S, U, V}, {x}); } } } }
c1b13f13e8f19dbe3d652052d1d75374aac006a8.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]) // #include <helpers/svd.h> #include <cuda_runtime.h> #include <cublas_v2.h> #include <cusolverDn.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> namespace sd { namespace ops { namespace helpers { // FIXME -> we should optimize these helpers for the case when input matrices have c order (perform transpositions appropriately) template <typename T> __global__ static void inverseColumnSignCuda(void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) { T* u = reinterpret_cast<T*>(vu); T* v = reinterpret_cast<T*>(vv); __shared__ int rank, uLastButOneColumn, vLastButOneColumn; // uRank = vRank __shared__ Nd4jLong uLen, vLen; __shared__ Nd4jLong *sharedMem; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sharedMem = reinterpret_cast<Nd4jLong*>(shmem); rank = shape::rank(uShapeInfo); uLen = shape::length(uShapeInfo); vLen = shape::length(vShapeInfo); uLastButOneColumn = uShapeInfo[rank] - 2; vLastButOneColumn = vShapeInfo[rank - 1] - 2; } __syncthreads(); const auto ind = threadIdx.x + blockIdx.x * blockDim.x; auto coords = sharedMem + threadIdx.x * rank; // u for (Nd4jLong i = ind; i < uLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, uShapeInfo, coords); if(coords[rank - 1] == 0 || coords[rank - 1] == uLastButOneColumn) // do not change sign in first and last but one columns continue; const auto uOffset = shape::getOffset(uShapeInfo, coords); u[uOffset] = -u[uOffset]; } // v for (Nd4jLong i = ind; i < vLen; i += gridDim.x * blockDim.x) { shape::index2coords(i, vShapeInfo, coords); if(coords[rank - 2] == 0 || coords[rank - 2] == vLastButOneColumn) // do not change sign in first and last but one columns continue; const auto vOffset = shape::getOffset(vShapeInfo, coords); v[vOffset] = -v[vOffset]; } } ////////////////////////////////////////////////////////////////////////// template <typename T> static void inverseColumnSignCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo) { inverseColumnSignCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vu, uShapeInfo, vv, vShapeInfo); } BUILD_SINGLE_TEMPLATE(template void inverseColumnSignCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, void* vu, const Nd4jLong* uShapeInfo, void* vv, const Nd4jLong* vShapeInfo), FLOAT_TYPES); ////////////////////////////////////////////////////////////////////////// static void svdQR(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* VT, const bool fullUV, const bool calcUV) { // since cusa api cusolverDnDgesvd/cusolverDnSgesvd have following constrain on input matrix A: A_rows >= A_columns && A_order = 'f' // we make this function to have deal with 2 valid cases only: // 1) A_rows >= A_columns and A_corder = 'f' // 2) A_rows <= A_columns and A_corder = 'c' - int this case perform transposition to get f order // if 1) or 2) are not met then throw exception // A [m, n] // S [n] // U [m, m] or [m, n] if fullUV = false and m > n // VT [n, n] or [m, n] if fullUV = false and m < n if(A->rankOf() != 2) throw std::runtime_error("svdQR: rank of A array is not equal 2 !"); auto m = A->sizeAt(0); auto n = A->sizeAt(1); const int minDim = m < n ? m : n; const char orderA = A->ordering(); if(m < n) throw std::runtime_error("svdQR: due to cuda api input constrains given shape of A array are not valid !"); if(std::vector<Nd4jLong>({minDim}) != S->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of S array !"); if(calcUV) { if(fullUV && std::vector<Nd4jLong>({m,m}) != U->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of U array !"); else if(!fullUV && std::vector<Nd4jLong>({m,minDim}) != U->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of U array !"); if(fullUV && std::vector<Nd4jLong>({n,n}) != VT->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of VT array !"); else if(!fullUV && std::vector<Nd4jLong>({minDim,n}) != VT->getShapeAsVector()) throw std::runtime_error("svdQR: wrong shape of VT array !"); } NDArray* pA = const_cast<NDArray*>(A); NDArray* pS = S; NDArray* pU = U; NDArray* pVT = VT; std::vector<NDArray*> toDelete; if(pA->ews() != 1 || pA->ordering() == 'c') { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } if(calcUV) { if(pU->ews() != 1 || pU->ordering() == 'c') { pU = new NDArray(U->dup('f')); toDelete.push_back(pU); } if(pVT->ews() != 1 || pVT->ordering() == 'c') { pVT = new NDArray(VT->dup('f')); toDelete.push_back(pVT); } } std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex()); // create cusolverDn handle cusolverDnHandle_t* handle = (cusolverDnHandle_t*)context->getCusolverHandle(); //nullptr; //cusolverStatus_t status = cusolverDnCreate(&handle); if(handle == nullptr) throw cuda_exception::build("svdQR: cuda failed !", -1); // stream auto status = cusolverDnSetStream(*handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = cusolverDnDgesvd_bufferSize(*handle, m, n, &lwork); else if(A->dataType() == DataType::FLOAT32) status = cusolverDnSgesvd_bufferSize(*handle, m, n, &lwork); else throw std::invalid_argument("svdQR: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); // allocate memory for dWork void* dWork = nullptr; cudaError_t status2 = cudaMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != cudaSuccess) throw cuda_exception::build("svdQR: cuda failed !", status2); signed char jobu, jobvt; if(calcUV) { if(fullUV) jobu = jobvt = 'A'; else jobu = jobvt = 'S'; } else { jobu = jobvt = 'N'; } int *devInfo = nullptr; void* rWork = nullptr; int lda(m), ldu, ldvt; if(calcUV) { ldu = pU->sizeAt(0); ldvt = pVT->sizeAt(0); } PointersManager manager(context, "svdQR"); NDArray::prepareSpecialUse({pS, pU, pVT}, {pA}); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = cusolverDnDgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pVT->specialBuffer()) : nullptr, ldvt, reinterpret_cast<double*>(dWork), lwork, reinterpret_cast<double*>(rWork), devInfo); } else if(A->dataType() == DataType::FLOAT32) { status = cusolverDnSgesvd(*handle, jobu, jobvt, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pVT->specialBuffer()) : nullptr, ldvt, reinterpret_cast<float*>(dWork), lwork, reinterpret_cast<float*>(rWork), devInfo); } else throw std::invalid_argument("svdQR: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdQR: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pVT}, {pA}); S->assign(pS); if(calcUV) { U->assign(pU); VT->assign(pVT); } for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) cudaFree(devInfo); if (dWork ) cudaFree(dWork); if (rWork) cudaFree(rWork); // if(handle) // cusolverDnDestroy(handle); // cudaDeviceReset(); } ////////////////////////////////////////////////////////////////////////// static void svdJcb(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV, const bool calcUV) { // A [m, n] // S [n] // U [m, m] or [m, n] if fullUV = false and m > n // V [n, n] or [n, m] if fullUV = false and m < n if(A->rankOf() != 2) throw std::runtime_error("svdJcb: rank of A array is not equal 2 !"); int m = A->sizeAt(0); int n = A->sizeAt(1); const int minDim = m < n ? m : n; if(std::vector<Nd4jLong>({minDim}) != S->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of S array !"); if(calcUV) { if(fullUV && std::vector<Nd4jLong>({m,m}) != U->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of U array !"); else if(!fullUV && std::vector<Nd4jLong>({m,minDim}) != U->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of U array !"); if(fullUV && std::vector<Nd4jLong>({n,n}) != V->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of V array !"); else if(!fullUV && std::vector<Nd4jLong>({n,minDim}) != V->getShapeAsVector()) throw std::runtime_error("svdJcb: wrong shape of V array !"); } NDArray* pA = const_cast<NDArray*>(A); const bool aForder = m == 1 || A->strideAt(0) == 1; const bool aCorder = n == 1 || A->strideAt(1) == 1; const bool transA = !aForder && aCorder; const bool dupA = !aForder && !aCorder; std::vector<NDArray*> toDelete; if(dupA) { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } NDArray* pS = S; if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } NDArray *pU(nullptr), *pV(nullptr); int lda = transA ? pA->strideAt(0) : pA->strideAt(1); int ldu(transA ? n : m), ldv(transA ? m : n); bool uForder(true), vForder(true); if(calcUV) { pU = transA ? V : U; pV = transA ? U : V; uForder = pU->sizeAt(0) == 1 || pU->strideAt(0) == 1; vForder = pV->sizeAt(0) == 1 || pV->strideAt(0) == 1; if(!uForder) { pU = new NDArray(pU->dup('f')); toDelete.push_back(pU); } if(!vForder) { pV = new NDArray(pV->dup('f')); toDelete.push_back(pV); } ldu = pU->strideAt(1); ldv = pV->strideAt(1); } std::lock_guard<std::mutex> lock(*LaunchContext::deviceMutex()); // create cusolverDn handle cusolverDnHandle_t* handle = (cusolverDnHandle_t*)context->getCusolverHandle(); //cusolverStatus_t status = cusolverDnCreate(&handle); if(handle == nullptr) throw cuda_exception::build("svdJcb: cuda failed !", -1); // stream auto status = cusolverDnSetStream(*handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); // set parameters gesvdjInfo_t gesvdjParams = nullptr; status = cusolverDnCreateGesvdjInfo(&gesvdjParams); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); int *devInfo = nullptr; const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; const int econ = !fullUV; if(transA) math::nd4j_swap<int>(m, n); // *** avoid bug in cuda API *** void* nullPtr = nullptr; NDArray* arrToAvoidBugInAPI = nullptr; if(!calcUV && m != n) { int maxDim = m > n ? m : n; arrToAvoidBugInAPI = new NDArray('c', {maxDim, maxDim}, pA->dataType(), context); nullPtr = arrToAvoidBugInAPI->specialBuffer(); } // ****************** NDArray::prepareSpecialUse({pS, pU, pV}, {pA}); // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = cusolverDnDgesvdj_bufferSize(*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, &lwork, gesvdjParams); else if(A->dataType() == DataType::FLOAT32) status = cusolverDnSgesvdj_bufferSize(*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, &lwork, gesvdjParams); else throw std::invalid_argument("svdJcb: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); // allocate memory dWork void* dWork = nullptr; auto status2 = cudaMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != cudaSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2); PointersManager manager(context, "svdJcb"); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = cusolverDnDgesvdj(*handle, jobz, econ, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : reinterpret_cast<double*>(nullPtr), ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams); } else if(A->dataType() == DataType::FLOAT32) { status = cusolverDnSgesvdj(*handle, jobz, econ, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : reinterpret_cast<float*>(nullPtr), ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams); } else throw std::invalid_argument("svdJcb: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdJcb: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pV}, {pA}); if(S->ews() != 1) S->assign(pS); if(calcUV) { if(!uForder) U->assign(transA ? pV : pU); if(!vForder) V->assign(transA ? pU : pV); } if(!calcUV && m != n) delete arrToAvoidBugInAPI; for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) cudaFree(devInfo); if (dWork ) cudaFree(dWork); // if(handle) // cusolverDnDestroy(handle); if(gesvdjParams) cusolverDnDestroyGesvdjInfo(gesvdjParams); // cudaDeviceReset(); } ////////////////////////////////////////////////////////////////////////// static void svdBatched(sd::LaunchContext* context, const NDArray* A, NDArray* S, NDArray* U, NDArray* V, const bool fullUV, const bool calcUV) { // A [..., m, n] // S [..., n] // U [..., m, m] or [..., m, n] if fullUV = false and m > n // V [..., n, n] or [..., n, m] if fullUV = false and m < n auto m = A->sizeAt(-2); auto n = A->sizeAt(-1); const int minDim = m < n ? m : n; const Nd4jLong bS = A->lengthOf() / (m * n); if(m > 32 || n > 32) throw std::runtime_error("svdBatched: numbers of rows and columns should be <= 32 !"); if(minDim != S->sizeAt(-1)) throw std::runtime_error("svdBatched: wrong shape of S array !"); if(calcUV) { if(U->sizeAt(-2) != m) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(U->sizeAt(-1) != (fullUV ? m : minDim)) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(U->lengthOf() / (U->sizeAt(-2) * U->sizeAt(-1)) != bS) throw std::runtime_error("svdBatched: wrong shape of U array !"); if(V->sizeAt(-2) != n) throw std::runtime_error("svdBatched: wrong shape of V array !"); if(V->sizeAt(-1) != (fullUV ? n : minDim)) throw std::runtime_error("svdBatched: wrong shape of V array !"); if(V->lengthOf() / (V->sizeAt(-2) * V->sizeAt(-1)) != bS) throw std::runtime_error("svdBatched: wrong shape of V array !"); } NDArray* pA = const_cast<NDArray*>(A); NDArray* pS = S; NDArray* pU = U; NDArray* pV = V; std::vector<NDArray*> toDelete; if(pA->ews() != 1 || pA->ordering() == 'c') { pA = new NDArray(A->dup('f')); toDelete.push_back(pA); } if(S->ews() != 1) { pS = new NDArray(S->dup('f')); toDelete.push_back(pS); } if(calcUV) { if(pU->ews() != 1 || pU->ordering() == 'c') { pU = new NDArray(U->dup('f')); toDelete.push_back(pU); } if(pV->ews() != 1 || pV->ordering() == 'c') { pV = new NDArray(V->dup('f')); toDelete.push_back(pV); } } // create cusolverDn handle cusolverDnHandle_t handle = nullptr; cusolverStatus_t status = cusolverDnCreate(&handle); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // stream status = cusolverDnSetStream(handle, *context->getCudaStream()); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // set parameters gesvdjInfo_t gesvdjParams = nullptr; status = cusolverDnCreateGesvdjInfo(&gesvdjParams); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); status = cusolverDnXgesvdjSetTolerance(gesvdjParams, 1.e-7); // tolerance if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); status = cusolverDnXgesvdjSetMaxSweeps(gesvdjParams, 15); // max_sweeps if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // devInfo int *devInfo = nullptr; auto status2 = cudaMalloc((void**)&devInfo, sizeof(int) * bS); if(status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); status2 = cudaDeviceSynchronize(); if(status2 != cudaSuccess) throw cuda_exception::build("svdJcb: cuda failed !", status2); const cusolverEigMode_t jobz = calcUV ? CUSOLVER_EIG_MODE_VECTOR : CUSOLVER_EIG_MODE_NOVECTOR; int lda(m), ldu, ldv; if(calcUV) { ldu = pU->sizeAt(-2); ldv = pV->sizeAt(-2); } // Ak (i,j) = A[i + 5*j + 25*k] // query working space of SVD int lwork = 0; if(A->dataType() == DataType::DOUBLE) status = cusolverDnDgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS); else if(A->dataType() == DataType::FLOAT32) status = cusolverDnSgesvdjBatched_bufferSize(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, &lwork, gesvdjParams, bS); else throw std::invalid_argument("svdBatched: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); // allocate memory dWork void* dWork = nullptr; status2 = cudaMalloc((void**)&dWork , A->sizeOfT() * lwork); if(status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); status2 = cudaDeviceSynchronize(); if(status2 != cudaSuccess) throw cuda_exception::build("svdBatched: cuda failed !", status2); PointersManager manager(context, "svdBatched"); NDArray::prepareSpecialUse({pS, pU, pV}, {pA}); // choose appropriate cuda gemm api depending on data types if(A->dataType() == DataType::DOUBLE) { status = cusolverDnDgesvdjBatched(handle, jobz, m, n, reinterpret_cast<double*>(pA->specialBuffer()), lda, reinterpret_cast<double*>(pS->specialBuffer()), calcUV ? reinterpret_cast<double*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<double*>(pV->specialBuffer()) : nullptr, ldv, reinterpret_cast<double*>(dWork), lwork, devInfo, gesvdjParams, bS); } else if(A->dataType() == DataType::FLOAT32) { status = cusolverDnSgesvdjBatched(handle, jobz, m, n, reinterpret_cast<float*>(pA->specialBuffer()), lda, reinterpret_cast<float*>(pS->specialBuffer()), calcUV ? reinterpret_cast<float*>(pU->specialBuffer()) : nullptr, ldu, calcUV ? reinterpret_cast<float*>(pV->specialBuffer()) : nullptr, ldv, reinterpret_cast<float*>(dWork), lwork, devInfo, gesvdjParams, bS); } else throw std::invalid_argument("svdBatched: given data type is unsupported !"); if(status != CUSOLVER_STATUS_SUCCESS) throw cuda_exception::build("svdBatched: cuda failed !", status); manager.synchronize(); NDArray::registerSpecialUse({pS, pU, pV}, {pA}); S->assign(pS); if(calcUV) { U->assign(pU); V->assign(pV); } for (int i = toDelete.size() - 1; i >= 0; --i) delete toDelete[i]; if (devInfo) cudaFree(devInfo); if (dWork ) cudaFree(dWork); if(handle) cusolverDnDestroy(handle); if(gesvdjParams) cusolverDnDestroyGesvdjInfo(gesvdjParams); // cudaDeviceReset(); } //////////////////////////////////////////////////////////////////// void svd(sd::LaunchContext* context, const NDArray* x, const std::vector<NDArray*>& outArrs, const bool fullUV, const bool calcUV, const int switchNum) { NDArray* S = outArrs[0]; NDArray* U = outArrs[1]; // NDArray VT = outArrs[2]->transpose(); NDArray* V = outArrs[2]; NDArray::prepareSpecialUse({S, U, V}, {x}); if(x->rankOf() == 2) { // svdQR(context, x, S, U, VT, fullUV, calcUV); svdJcb(context, x, S, U, V, fullUV, calcUV); } else { // svdBatched(context, *x, *S, *U, *V, fullUV, calcUV); ResultSet *tadsU(nullptr), *tadsV(nullptr); auto tadsX = x->allTensorsAlongDimension({x->rankOf() - 2, x->rankOf() - 1}); auto tadsS = S->allTensorsAlongDimension({S->rankOf() - 1}); if(calcUV) { tadsU = new ResultSet(U->allTensorsAlongDimension({U->rankOf() - 2, U->rankOf() - 1})); tadsV = new ResultSet(V->allTensorsAlongDimension({V->rankOf() - 2, V->rankOf() - 1})); } for (int i = 0; i < tadsX.size(); ++i) svdJcb(context, tadsX.at(i), tadsS.at(i), calcUV ? tadsU->at(i) : nullptr, calcUV ? tadsV->at(i) : nullptr, fullUV, calcUV); if(calcUV) { delete tadsU; delete tadsV; } } NDArray::registerSpecialUse({S, U, V}, {x}); } } } }
7a96046b907274625b04eb9c8c79167ee8109c1a.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020 XGBoost contributors */ #include "proxy_dmatrix.h" #include "device_adapter_hip.cuh" namespace xgboost { namespace data { void DMatrixProxy::FromCudaColumnar(std::string interface_str) { std::shared_ptr<data::CudfAdapter> adapter {new data::CudfAdapter(interface_str)}; auto const& value = adapter->Value(); this->batch_ = adapter; device_ = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (device_ < 0) { CHECK_EQ(this->Info().num_row_, 0); } } void DMatrixProxy::FromCudaArray(std::string interface_str) { std::shared_ptr<CupyAdapter> adapter(new CupyAdapter(interface_str)); this->batch_ = adapter; device_ = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (device_ < 0) { CHECK_EQ(this->Info().num_row_, 0); } } } // namespace data } // namespace xgboost
7a96046b907274625b04eb9c8c79167ee8109c1a.cu
/*! * Copyright 2020 XGBoost contributors */ #include "proxy_dmatrix.h" #include "device_adapter.cuh" namespace xgboost { namespace data { void DMatrixProxy::FromCudaColumnar(std::string interface_str) { std::shared_ptr<data::CudfAdapter> adapter {new data::CudfAdapter(interface_str)}; auto const& value = adapter->Value(); this->batch_ = adapter; device_ = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (device_ < 0) { CHECK_EQ(this->Info().num_row_, 0); } } void DMatrixProxy::FromCudaArray(std::string interface_str) { std::shared_ptr<CupyAdapter> adapter(new CupyAdapter(interface_str)); this->batch_ = adapter; device_ = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (device_ < 0) { CHECK_EQ(this->Info().num_row_, 0); } } } // namespace data } // namespace xgboost
3f168124422e17ce0ab06b33e89c4b42b4f48985.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "MandelbrotMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n) { MandelbrotMath mandelbrotMath = MandelbrotMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; float x; // Float a float, perte de prcision, gain de process. float y; int i; // in [0,h[ int j; // in [0,w[ int s = TID; // in [0,... while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[ domaineMath.toXY(i, j, &x, &y); mandelbrotMath.colorXY(&ptrDevPixels[s], x, y); // update ptrTabPixels[s] s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
3f168124422e17ce0ab06b33e89c4b42b4f48985.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "DomaineMath_GPU.h" #include "MandelbrotMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void mandelbrot(uchar4* ptrDevPixels, uint w, uint h, DomaineMath domaineMath, uint n) { MandelbrotMath mandelbrotMath = MandelbrotMath(n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; float x; // Float a float, perte de précision, gain de process. float y; int i; // in [0,h[ int j; // in [0,w[ int s = TID; // in [0,... while (s < WH) { IndiceTools::toIJ(s, w, &i, &j); // s[0,W*H[ --> i[0,H[ j[0,W[ domaineMath.toXY(i, j, &x, &y); mandelbrotMath.colorXY(&ptrDevPixels[s], x, y); // update ptrTabPixels[s] s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
b5066d047a364ccbc8662a96d3f7113fbee5386e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" namespace cv { namespace cuda { namespace device { namespace hog { __constant__ int cnbins; __constant__ int cblock_stride_x; __constant__ int cblock_stride_y; __constant__ int cnblocks_win_x; __constant__ int cnblocks_win_y; __constant__ int cncells_block_x; __constant__ int cncells_block_y; __constant__ int cblock_hist_size; __constant__ int cblock_hist_size_2up; __constant__ int cdescr_size; __constant__ int cdescr_width; /* Returns the nearest upper power of two, works only for the typical GPU thread count (pert block) values */ int power_2up(unsigned int n) { if (n <= 1) return 1; else if (n <= 2) return 2; else if (n <= 4) return 4; else if (n <= 8) return 8; else if (n <= 16) return 16; else if (n <= 32) return 32; else if (n <= 64) return 64; else if (n <= 128) return 128; else if (n <= 256) return 256; else if (n <= 512) return 512; else if (n <= 1024) return 1024; return -1; // Input is too big } /* Returns the max size for nblocks */ int max_nblocks(int nthreads, int ncells_block = 1) { int threads = nthreads * ncells_block; if(threads * 4 <= 256) return 4; else if(threads * 3 <= 256) return 3; else if(threads * 2 <= 256) return 2; else return 1; } void set_up_constants(int nbins, int block_stride_x, int block_stride_y, int nblocks_win_x, int nblocks_win_y, int ncells_block_x, int ncells_block_y, const hipStream_t& stream) { cudaSafeCall(hipMemcpyToSymbolAsync(cnbins, &nbins, sizeof(nbins), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cblock_stride_x, &block_stride_x, sizeof(block_stride_x), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cblock_stride_y, &block_stride_y, sizeof(block_stride_y), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cncells_block_x, &ncells_block_x, sizeof(ncells_block_x), 0, hipMemcpyHostToDevice, stream)); cudaSafeCall(hipMemcpyToSymbolAsync(cncells_block_y, &ncells_block_y, sizeof(ncells_block_y), 0, hipMemcpyHostToDevice, stream)); int block_hist_size = nbins * ncells_block_x * ncells_block_y; cudaSafeCall(hipMemcpyToSymbolAsync(cblock_hist_size, &block_hist_size, sizeof(block_hist_size), 0, hipMemcpyHostToDevice, stream)); int block_hist_size_2up = power_2up(block_hist_size); cudaSafeCall(hipMemcpyToSymbolAsync(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up), 0, hipMemcpyHostToDevice, stream)); int descr_width = nblocks_win_x * block_hist_size; cudaSafeCall(hipMemcpyToSymbolAsync(cdescr_width, &descr_width, sizeof(descr_width), 0, hipMemcpyHostToDevice, stream)); int descr_size = descr_width * nblocks_win_y; cudaSafeCall(hipMemcpyToSymbolAsync(cdescr_size, &descr_size, sizeof(descr_size), 0, hipMemcpyHostToDevice, stream)); } //---------------------------------------------------------------------------- // Histogram computation // // CUDA kernel to compute the histograms template <int nblocks> // Number of histogram blocks processed by single GPU thread block __global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad, const PtrStepb qangle, float scale, float* block_hists, int cell_size, int patch_size, int block_patch_size, int threads_cell, int threads_block, int half_cell_size) { const int block_x = threadIdx.z; const int cell_x = threadIdx.x / threads_cell; const int cell_y = threadIdx.y; const int cell_thread_x = threadIdx.x & (threads_cell - 1); if (blockIdx.x * blockDim.z + block_x >= img_block_width) return; extern __shared__ float smem[]; float* hists = smem; float* final_hist = smem + cnbins * block_patch_size * nblocks; // patch_size means that patch_size pixels affect on block's cell if (cell_thread_x < patch_size) { const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x + half_cell_size * cell_x + cell_thread_x; const int offset_y = blockIdx.y * cblock_stride_y + half_cell_size * cell_y; const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2; const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2; float* hist = hists + patch_size * (cell_y * blockDim.z * cncells_block_y + cell_x + block_x * cncells_block_x) + cell_thread_x; for (int bin_id = 0; bin_id < cnbins; ++bin_id) hist[bin_id * block_patch_size * nblocks] = 0.f; //(dist_x, dist_y) : distance between current pixel in patch and cell's center const int dist_x = -half_cell_size + (int)cell_thread_x - half_cell_size * cell_x; const int dist_y_begin = -half_cell_size - half_cell_size * (int)threadIdx.y; for (int dist_y = dist_y_begin; dist_y < dist_y_begin + patch_size; ++dist_y) { float2 vote = *(const float2*)grad_ptr; uchar2 bin = *(const uchar2*)qangle_ptr; grad_ptr += grad.step/sizeof(float); qangle_ptr += qangle.step; //(dist_center_x, dist_center_y) : distance between current pixel in patch and block's center int dist_center_y = dist_y - half_cell_size * (1 - 2 * cell_y); int dist_center_x = dist_x - half_cell_size * (1 - 2 * cell_x); float gaussian = ::expf(-(dist_center_y * dist_center_y + dist_center_x * dist_center_x) * scale); float interp_weight = ((float)cell_size - ::fabs(dist_y + 0.5f)) * ((float)cell_size - ::fabs(dist_x + 0.5f)) / (float)threads_block; hist[bin.x * block_patch_size * nblocks] += gaussian * interp_weight * vote.x; hist[bin.y * block_patch_size * nblocks] += gaussian * interp_weight * vote.y; } //reduction of the histograms volatile float* hist_ = hist; for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += block_patch_size * nblocks) { if (cell_thread_x < patch_size/2) hist_[0] += hist_[patch_size/2]; if (cell_thread_x < patch_size/4 && (!((patch_size/4) < 3 && cell_thread_x == 0))) hist_[0] += hist_[patch_size/4]; if (cell_thread_x == 0) final_hist[((cell_x + block_x * cncells_block_x) * cncells_block_y + cell_y) * cnbins + bin_id] = hist_[0] + hist_[1] + hist_[2]; } } __syncthreads(); float* block_hist = block_hists + (blockIdx.y * img_block_width + blockIdx.x * blockDim.z + block_x) * cblock_hist_size; //copying from final_hist to block_hist int tid; if(threads_cell < cnbins) { tid = (cell_y * cncells_block_y + cell_x) * cnbins + cell_thread_x; } else { tid = (cell_y * cncells_block_y + cell_x) * threads_cell + cell_thread_x; } if (tid < cblock_hist_size) { block_hist[tid] = final_hist[block_x * cblock_hist_size + tid]; if(threads_cell < cnbins && cell_thread_x == (threads_cell-1)) { for(int i=1;i<=(cnbins - threads_cell);++i) { block_hist[tid + i] = final_hist[block_x * cblock_hist_size + tid + i]; } } } } //declaration of variables and invoke the kernel with the calculated number of blocks void compute_hists(int nbins, int block_stride_x, int block_stride_y, int height, int width, const PtrStepSzf& grad, const PtrStepSzb& qangle, float sigma, float* block_hists, int cell_size_x, int cell_size_y, int ncells_block_x, int ncells_block_y, const hipStream_t& stream) { const int ncells_block = ncells_block_x * ncells_block_y; const int patch_side = cell_size_x / 4; const int patch_size = cell_size_x + (patch_side * 2); const int block_patch_size = ncells_block * patch_size; const int threads_cell = power_2up(patch_size); const int threads_block = ncells_block * threads_cell; const int half_cell_size = cell_size_x / 2; int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; int img_block_height = (height - ncells_block_y * cell_size_y + block_stride_y) / block_stride_y; const int nblocks = max_nblocks(threads_cell, ncells_block); dim3 grid(divUp(img_block_width, nblocks), img_block_height); dim3 threads(threads_cell * ncells_block_x, ncells_block_y, nblocks); // Precompute gaussian spatial window parameter float scale = 1.f / (2.f * sigma * sigma); int hists_size = (nbins * ncells_block * patch_size * nblocks) * sizeof(float); int final_hists_size = (nbins * ncells_block * nblocks) * sizeof(float); int smem = hists_size + final_hists_size; if (nblocks == 4) hipLaunchKernelGGL(( compute_hists_kernel_many_blocks<4>), dim3(grid), dim3(threads), smem, stream, img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else if (nblocks == 3) hipLaunchKernelGGL(( compute_hists_kernel_many_blocks<3>), dim3(grid), dim3(threads), smem, stream, img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else if (nblocks == 2) hipLaunchKernelGGL(( compute_hists_kernel_many_blocks<2>), dim3(grid), dim3(threads), smem, stream, img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else hipLaunchKernelGGL(( compute_hists_kernel_many_blocks<1>), dim3(grid), dim3(threads), smem, stream, img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); cudaSafeCall( hipGetLastError() ); } //------------------------------------------------------------- // Normalization of histograms via L2Hys_norm // template<int size> __device__ float reduce_smem(float* smem, float val) { unsigned int tid = threadIdx.x; float sum = val; reduce<size>(smem, sum, tid, plus<float>()); if (size == 32) { #if __CUDA_ARCH__ >= 300 return shfl(sum, 0); #else return smem[0]; #endif } else { #if __CUDA_ARCH__ >= 300 if (threadIdx.x == 0) smem[0] = sum; #endif __syncthreads(); return smem[0]; } } template <int nthreads, // Number of threads which process one block historgam int nblocks> // Number of block hisograms processed by one GPU thread block __global__ void normalize_hists_kernel_many_blocks(const int block_hist_size, const int img_block_width, float* block_hists, float threshold) { if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width) return; float* hist = block_hists + (blockIdx.y * img_block_width + blockIdx.x * blockDim.z + threadIdx.z) * block_hist_size + threadIdx.x; __shared__ float sh_squares[nthreads * nblocks]; float* squares = sh_squares + threadIdx.z * nthreads; float elem = 0.f; if (threadIdx.x < block_hist_size) elem = hist[0]; __syncthreads(); // prevent race condition (redundant?) float sum = reduce_smem<nthreads>(squares, elem * elem); float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size); elem = ::min(elem * scale, threshold); __syncthreads(); // prevent race condition sum = reduce_smem<nthreads>(squares, elem * elem); scale = 1.0f / (::sqrtf(sum) + 1e-3f); if (threadIdx.x < block_hist_size) hist[0] = elem * scale; } void normalize_hists(int nbins, int block_stride_x, int block_stride_y, int height, int width, float* block_hists, float threshold, int cell_size_x, int cell_size_y, int ncells_block_x, int ncells_block_y, const hipStream_t& stream) { const int nblocks = 1; int block_hist_size = nbins * ncells_block_x * ncells_block_y; int nthreads = power_2up(block_hist_size); dim3 threads(nthreads, 1, nblocks); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; int img_block_height = (height - ncells_block_y * cell_size_y + block_stride_y) / block_stride_y; dim3 grid(divUp(img_block_width, nblocks), img_block_height); if (nthreads == 32) hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<32, nblocks>), dim3(grid), dim3(threads), 0, stream, block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 64) hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<64, nblocks>), dim3(grid), dim3(threads), 0, stream, block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 128) hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<128, nblocks>), dim3(grid), dim3(threads), 0, stream, block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 256) hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<256, nblocks>), dim3(grid), dim3(threads), 0, stream, block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 512) hipLaunchKernelGGL(( normalize_hists_kernel_many_blocks<512, nblocks>), dim3(grid), dim3(threads), 0, stream, block_hist_size, img_block_width, block_hists, threshold); else CV_Error(cv::Error::StsBadArg, "normalize_hists: histogram's size is too big, try to decrease number of bins"); cudaSafeCall( hipGetLastError() ); } //--------------------------------------------------------------------- // Linear SVM based classification // // return confidence values not just positive location template <int nthreads, // Number of threads per one histogram block int nblocks> // Number of histogram block processed by single GPU thread block __global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, const float* coefs, float free_coef, float threshold, float* confidences) { const int win_x = threadIdx.z; if (blockIdx.x * blockDim.z + win_x >= img_win_width) return; const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x * blockDim.z + win_x) * cblock_hist_size; float product = 0.f; for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } __shared__ float products[nthreads * nblocks]; const int tid = threadIdx.z * nthreads + threadIdx.x; reduce<nthreads>(products, product, tid, plus<float>()); if (threadIdx.x == 0) confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef; } void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, float* coefs, float free_coef, float threshold, int cell_size_x, int ncells_block_x, float *confidences) { const int nthreads = 256; const int nblocks = 1; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1, nblocks); dim3 grid(divUp(img_win_width, nblocks), img_win_height); cudaSafeCall(hipFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>, hipFuncCachePreferL1)); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; hipLaunchKernelGGL(( compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>), dim3(grid), dim3(threads), 0, 0, img_win_width, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, coefs, free_coef, threshold, confidences); cudaSafeCall(hipDeviceSynchronize()); } template <int nthreads, // Number of threads per one histogram block int nblocks> // Number of histogram block processed by single GPU thread block __global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, const float* coefs, float free_coef, float threshold, unsigned char* labels) { const int win_x = threadIdx.z; if (blockIdx.x * blockDim.z + win_x >= img_win_width) return; const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x * blockDim.z + win_x) * cblock_hist_size; float product = 0.f; for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } __shared__ float products[nthreads * nblocks]; const int tid = threadIdx.z * nthreads + threadIdx.x; reduce<nthreads>(products, product, tid, plus<float>()); if (threadIdx.x == 0) labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold); } void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, float* coefs, float free_coef, float threshold, int cell_size_x, int ncells_block_x, unsigned char* labels) { const int nthreads = 256; const int nblocks = 1; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1, nblocks); dim3 grid(divUp(img_win_width, nblocks), img_win_height); cudaSafeCall(hipFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, hipFuncCachePreferL1)); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; hipLaunchKernelGGL(( classify_hists_kernel_many_blocks<nthreads, nblocks>), dim3(grid), dim3(threads), 0, 0, img_win_width, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, coefs, free_coef, threshold, labels); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //---------------------------------------------------------------------------- // Extract descriptors template <int nthreads> __global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, PtrStepf descriptors) { // Get left top corner of the window in src const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x); // Copy elements from src to dst for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } } void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, int cell_size_x, int ncells_block_x, PtrStepSzf descriptors, const hipStream_t& stream) { const int nthreads = 256; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1); dim3 grid(img_win_width, img_win_height); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; hipLaunchKernelGGL(( extract_descrs_by_rows_kernel<nthreads>), dim3(grid), dim3(threads), 0, stream, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors); cudaSafeCall( hipGetLastError() ); } template <int nthreads> __global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, PtrStepf descriptors) { // Get left top corner of the window in src const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x); // Copy elements from src to dst for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int block_idx = i / cblock_hist_size; int idx_in_block = i - block_idx * cblock_hist_size; int y = block_idx / cnblocks_win_x; int x = block_idx - y * cnblocks_win_x; descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] = hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block]; } } void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, int cell_size_x, int ncells_block_x, PtrStepSzf descriptors, const hipStream_t& stream) { const int nthreads = 256; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1); dim3 grid(img_win_width, img_win_height); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; hipLaunchKernelGGL(( extract_descrs_by_cols_kernel<nthreads>), dim3(grid), dim3(threads), 0, stream, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors); cudaSafeCall( hipGetLastError() ); } //---------------------------------------------------------------------------- // Gradients computation template <int nthreads, int correct_gamma> __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img, float angle_scale, PtrStepf grad, PtrStepb qangle) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const uchar4* row = (const uchar4*)img.ptr(blockIdx.y); __shared__ float sh_row[(nthreads + 2) * 3]; uchar4 val; if (x < width) val = row[x]; else val = row[width - 2]; sh_row[threadIdx.x + 1] = val.x; sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y; sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z; if (threadIdx.x == 0) { val = row[::max(x - 1, 1)]; sh_row[0] = val.x; sh_row[(nthreads + 2)] = val.y; sh_row[2 * (nthreads + 2)] = val.z; } if (threadIdx.x == blockDim.x - 1) { val = row[::min(x + 1, width - 2)]; sh_row[blockDim.x + 1] = val.x; sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y; sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z; } __syncthreads(); if (x < width) { float3 a, b; b.x = sh_row[threadIdx.x + 2]; b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)]; b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)]; a.x = sh_row[threadIdx.x]; a.y = sh_row[threadIdx.x + (nthreads + 2)]; a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)]; float3 dx; if (correct_gamma) dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z)); else dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z); float3 dy = make_float3(0.f, 0.f, 0.f); if (blockIdx.y > 0 && blockIdx.y < height - 1) { val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x]; a = make_float3(val.x, val.y, val.z); val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x]; b = make_float3(val.x, val.y, val.z); if (correct_gamma) dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z)); else dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z); } float best_dx = dx.x; float best_dy = dy.x; float mag0 = dx.x * dx.x + dy.x * dy.x; float mag1 = dx.y * dx.y + dy.y * dy.y; if (mag0 < mag1) { best_dx = dx.y; best_dy = dy.y; mag0 = mag1; } mag1 = dx.z * dx.z + dy.z * dy.z; if (mag0 < mag1) { best_dx = dx.z; best_dy = dy.z; mag0 = mag1; } mag0 = ::sqrtf(mag0); float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f; int hidx = (int)::floorf(ang); ang -= hidx; hidx = (hidx + cnbins) % cnbins; ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins); ((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang); } } void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img, float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma, const hipStream_t& stream) { CV_UNUSED(nbins); const int nthreads = 256; dim3 bdim(nthreads, 1); dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y)); if (correct_gamma) hipLaunchKernelGGL(( compute_gradients_8UC4_kernel<nthreads, 1>), dim3(gdim), dim3(bdim), 0, stream, height, width, img, angle_scale, grad, qangle); else hipLaunchKernelGGL(( compute_gradients_8UC4_kernel<nthreads, 0>), dim3(gdim), dim3(bdim), 0, stream, height, width, img, angle_scale, grad, qangle); cudaSafeCall( hipGetLastError() ); } template <int nthreads, int correct_gamma> __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img, float angle_scale, PtrStepf grad, PtrStepb qangle) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y); __shared__ float sh_row[nthreads + 2]; if (x < width) sh_row[threadIdx.x + 1] = row[x]; else sh_row[threadIdx.x + 1] = row[width - 2]; if (threadIdx.x == 0) sh_row[0] = row[::max(x - 1, 1)]; if (threadIdx.x == blockDim.x - 1) sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)]; __syncthreads(); if (x < width) { float dx; if (correct_gamma) dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]); else dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x]; float dy = 0.f; if (blockIdx.y > 0 && blockIdx.y < height - 1) { float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x]; float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x]; if (correct_gamma) dy = ::sqrtf(a) - ::sqrtf(b); else dy = a - b; } float mag = ::sqrtf(dx * dx + dy * dy); float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f; int hidx = (int)::floorf(ang); ang -= hidx; hidx = (hidx + cnbins) % cnbins; ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins); ((float2*) grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang); } } void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img, float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma, const hipStream_t& stream) { CV_UNUSED(nbins); const int nthreads = 256; dim3 bdim(nthreads, 1); dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y)); if (correct_gamma) hipLaunchKernelGGL(( compute_gradients_8UC1_kernel<nthreads, 1>), dim3(gdim), dim3(bdim), 0, stream, height, width, img, angle_scale, grad, qangle); else hipLaunchKernelGGL(( compute_gradients_8UC1_kernel<nthreads, 0>), dim3(gdim), dim3(bdim), 0, stream, height, width, img, angle_scale, grad, qangle); cudaSafeCall( hipGetLastError() ); } //------------------------------------------------------------------- // Resize texture<uchar4, 2, hipReadModeNormalizedFloat> resize8UC4_tex; texture<uchar, 2, hipReadModeNormalizedFloat> resize8UC1_tex; __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255; } __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy); dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255); } } template<class T, class TEX> static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex) { tex.filterMode = hipFilterModeLinear; size_t texOfs = 0; int colOfs = 0; hipChannelFormatDesc desc = hipCreateChannelDesc<T>(); cudaSafeCall( hipBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) ); if (texOfs != 0) { colOfs = static_cast<int>( texOfs/sizeof(T) ); cudaSafeCall( hipUnbindTexture(tex) ); cudaSafeCall( hipBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) ); } dim3 threads(32, 8); dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y)); float sx = static_cast<float>(src.cols) / dst.cols; float sy = static_cast<float>(src.rows) / dst.rows; hipLaunchKernelGGL(( resize_for_hog_kernel), dim3(grid), dim3(threads), 0, 0, sx, sy, (PtrStepSz<T>)dst, colOfs); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); cudaSafeCall( hipUnbindTexture(tex) ); } void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); } void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); } } // namespace hog }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
b5066d047a364ccbc8662a96d3f7113fbee5386e.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/reduce.hpp" #include "opencv2/core/cuda/functional.hpp" #include "opencv2/core/cuda/warp_shuffle.hpp" namespace cv { namespace cuda { namespace device { namespace hog { __constant__ int cnbins; __constant__ int cblock_stride_x; __constant__ int cblock_stride_y; __constant__ int cnblocks_win_x; __constant__ int cnblocks_win_y; __constant__ int cncells_block_x; __constant__ int cncells_block_y; __constant__ int cblock_hist_size; __constant__ int cblock_hist_size_2up; __constant__ int cdescr_size; __constant__ int cdescr_width; /* Returns the nearest upper power of two, works only for the typical GPU thread count (pert block) values */ int power_2up(unsigned int n) { if (n <= 1) return 1; else if (n <= 2) return 2; else if (n <= 4) return 4; else if (n <= 8) return 8; else if (n <= 16) return 16; else if (n <= 32) return 32; else if (n <= 64) return 64; else if (n <= 128) return 128; else if (n <= 256) return 256; else if (n <= 512) return 512; else if (n <= 1024) return 1024; return -1; // Input is too big } /* Returns the max size for nblocks */ int max_nblocks(int nthreads, int ncells_block = 1) { int threads = nthreads * ncells_block; if(threads * 4 <= 256) return 4; else if(threads * 3 <= 256) return 3; else if(threads * 2 <= 256) return 2; else return 1; } void set_up_constants(int nbins, int block_stride_x, int block_stride_y, int nblocks_win_x, int nblocks_win_y, int ncells_block_x, int ncells_block_y, const cudaStream_t& stream) { cudaSafeCall(cudaMemcpyToSymbolAsync(cnbins, &nbins, sizeof(nbins), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cblock_stride_x, &block_stride_x, sizeof(block_stride_x), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cblock_stride_y, &block_stride_y, sizeof(block_stride_y), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cnblocks_win_x, &nblocks_win_x, sizeof(nblocks_win_x), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cnblocks_win_y, &nblocks_win_y, sizeof(nblocks_win_y), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cncells_block_x, &ncells_block_x, sizeof(ncells_block_x), 0, cudaMemcpyHostToDevice, stream)); cudaSafeCall(cudaMemcpyToSymbolAsync(cncells_block_y, &ncells_block_y, sizeof(ncells_block_y), 0, cudaMemcpyHostToDevice, stream)); int block_hist_size = nbins * ncells_block_x * ncells_block_y; cudaSafeCall(cudaMemcpyToSymbolAsync(cblock_hist_size, &block_hist_size, sizeof(block_hist_size), 0, cudaMemcpyHostToDevice, stream)); int block_hist_size_2up = power_2up(block_hist_size); cudaSafeCall(cudaMemcpyToSymbolAsync(cblock_hist_size_2up, &block_hist_size_2up, sizeof(block_hist_size_2up), 0, cudaMemcpyHostToDevice, stream)); int descr_width = nblocks_win_x * block_hist_size; cudaSafeCall(cudaMemcpyToSymbolAsync(cdescr_width, &descr_width, sizeof(descr_width), 0, cudaMemcpyHostToDevice, stream)); int descr_size = descr_width * nblocks_win_y; cudaSafeCall(cudaMemcpyToSymbolAsync(cdescr_size, &descr_size, sizeof(descr_size), 0, cudaMemcpyHostToDevice, stream)); } //---------------------------------------------------------------------------- // Histogram computation // // CUDA kernel to compute the histograms template <int nblocks> // Number of histogram blocks processed by single GPU thread block __global__ void compute_hists_kernel_many_blocks(const int img_block_width, const PtrStepf grad, const PtrStepb qangle, float scale, float* block_hists, int cell_size, int patch_size, int block_patch_size, int threads_cell, int threads_block, int half_cell_size) { const int block_x = threadIdx.z; const int cell_x = threadIdx.x / threads_cell; const int cell_y = threadIdx.y; const int cell_thread_x = threadIdx.x & (threads_cell - 1); if (blockIdx.x * blockDim.z + block_x >= img_block_width) return; extern __shared__ float smem[]; float* hists = smem; float* final_hist = smem + cnbins * block_patch_size * nblocks; // patch_size means that patch_size pixels affect on block's cell if (cell_thread_x < patch_size) { const int offset_x = (blockIdx.x * blockDim.z + block_x) * cblock_stride_x + half_cell_size * cell_x + cell_thread_x; const int offset_y = blockIdx.y * cblock_stride_y + half_cell_size * cell_y; const float* grad_ptr = grad.ptr(offset_y) + offset_x * 2; const unsigned char* qangle_ptr = qangle.ptr(offset_y) + offset_x * 2; float* hist = hists + patch_size * (cell_y * blockDim.z * cncells_block_y + cell_x + block_x * cncells_block_x) + cell_thread_x; for (int bin_id = 0; bin_id < cnbins; ++bin_id) hist[bin_id * block_patch_size * nblocks] = 0.f; //(dist_x, dist_y) : distance between current pixel in patch and cell's center const int dist_x = -half_cell_size + (int)cell_thread_x - half_cell_size * cell_x; const int dist_y_begin = -half_cell_size - half_cell_size * (int)threadIdx.y; for (int dist_y = dist_y_begin; dist_y < dist_y_begin + patch_size; ++dist_y) { float2 vote = *(const float2*)grad_ptr; uchar2 bin = *(const uchar2*)qangle_ptr; grad_ptr += grad.step/sizeof(float); qangle_ptr += qangle.step; //(dist_center_x, dist_center_y) : distance between current pixel in patch and block's center int dist_center_y = dist_y - half_cell_size * (1 - 2 * cell_y); int dist_center_x = dist_x - half_cell_size * (1 - 2 * cell_x); float gaussian = ::expf(-(dist_center_y * dist_center_y + dist_center_x * dist_center_x) * scale); float interp_weight = ((float)cell_size - ::fabs(dist_y + 0.5f)) * ((float)cell_size - ::fabs(dist_x + 0.5f)) / (float)threads_block; hist[bin.x * block_patch_size * nblocks] += gaussian * interp_weight * vote.x; hist[bin.y * block_patch_size * nblocks] += gaussian * interp_weight * vote.y; } //reduction of the histograms volatile float* hist_ = hist; for (int bin_id = 0; bin_id < cnbins; ++bin_id, hist_ += block_patch_size * nblocks) { if (cell_thread_x < patch_size/2) hist_[0] += hist_[patch_size/2]; if (cell_thread_x < patch_size/4 && (!((patch_size/4) < 3 && cell_thread_x == 0))) hist_[0] += hist_[patch_size/4]; if (cell_thread_x == 0) final_hist[((cell_x + block_x * cncells_block_x) * cncells_block_y + cell_y) * cnbins + bin_id] = hist_[0] + hist_[1] + hist_[2]; } } __syncthreads(); float* block_hist = block_hists + (blockIdx.y * img_block_width + blockIdx.x * blockDim.z + block_x) * cblock_hist_size; //copying from final_hist to block_hist int tid; if(threads_cell < cnbins) { tid = (cell_y * cncells_block_y + cell_x) * cnbins + cell_thread_x; } else { tid = (cell_y * cncells_block_y + cell_x) * threads_cell + cell_thread_x; } if (tid < cblock_hist_size) { block_hist[tid] = final_hist[block_x * cblock_hist_size + tid]; if(threads_cell < cnbins && cell_thread_x == (threads_cell-1)) { for(int i=1;i<=(cnbins - threads_cell);++i) { block_hist[tid + i] = final_hist[block_x * cblock_hist_size + tid + i]; } } } } //declaration of variables and invoke the kernel with the calculated number of blocks void compute_hists(int nbins, int block_stride_x, int block_stride_y, int height, int width, const PtrStepSzf& grad, const PtrStepSzb& qangle, float sigma, float* block_hists, int cell_size_x, int cell_size_y, int ncells_block_x, int ncells_block_y, const cudaStream_t& stream) { const int ncells_block = ncells_block_x * ncells_block_y; const int patch_side = cell_size_x / 4; const int patch_size = cell_size_x + (patch_side * 2); const int block_patch_size = ncells_block * patch_size; const int threads_cell = power_2up(patch_size); const int threads_block = ncells_block * threads_cell; const int half_cell_size = cell_size_x / 2; int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; int img_block_height = (height - ncells_block_y * cell_size_y + block_stride_y) / block_stride_y; const int nblocks = max_nblocks(threads_cell, ncells_block); dim3 grid(divUp(img_block_width, nblocks), img_block_height); dim3 threads(threads_cell * ncells_block_x, ncells_block_y, nblocks); // Precompute gaussian spatial window parameter float scale = 1.f / (2.f * sigma * sigma); int hists_size = (nbins * ncells_block * patch_size * nblocks) * sizeof(float); int final_hists_size = (nbins * ncells_block * nblocks) * sizeof(float); int smem = hists_size + final_hists_size; if (nblocks == 4) compute_hists_kernel_many_blocks<4><<<grid, threads, smem, stream>>>(img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else if (nblocks == 3) compute_hists_kernel_many_blocks<3><<<grid, threads, smem, stream>>>(img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else if (nblocks == 2) compute_hists_kernel_many_blocks<2><<<grid, threads, smem, stream>>>(img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); else compute_hists_kernel_many_blocks<1><<<grid, threads, smem, stream>>>(img_block_width, grad, qangle, scale, block_hists, cell_size_x, patch_size, block_patch_size, threads_cell, threads_block, half_cell_size); cudaSafeCall( cudaGetLastError() ); } //------------------------------------------------------------- // Normalization of histograms via L2Hys_norm // template<int size> __device__ float reduce_smem(float* smem, float val) { unsigned int tid = threadIdx.x; float sum = val; reduce<size>(smem, sum, tid, plus<float>()); if (size == 32) { #if __CUDA_ARCH__ >= 300 return shfl(sum, 0); #else return smem[0]; #endif } else { #if __CUDA_ARCH__ >= 300 if (threadIdx.x == 0) smem[0] = sum; #endif __syncthreads(); return smem[0]; } } template <int nthreads, // Number of threads which process one block historgam int nblocks> // Number of block hisograms processed by one GPU thread block __global__ void normalize_hists_kernel_many_blocks(const int block_hist_size, const int img_block_width, float* block_hists, float threshold) { if (blockIdx.x * blockDim.z + threadIdx.z >= img_block_width) return; float* hist = block_hists + (blockIdx.y * img_block_width + blockIdx.x * blockDim.z + threadIdx.z) * block_hist_size + threadIdx.x; __shared__ float sh_squares[nthreads * nblocks]; float* squares = sh_squares + threadIdx.z * nthreads; float elem = 0.f; if (threadIdx.x < block_hist_size) elem = hist[0]; __syncthreads(); // prevent race condition (redundant?) float sum = reduce_smem<nthreads>(squares, elem * elem); float scale = 1.0f / (::sqrtf(sum) + 0.1f * block_hist_size); elem = ::min(elem * scale, threshold); __syncthreads(); // prevent race condition sum = reduce_smem<nthreads>(squares, elem * elem); scale = 1.0f / (::sqrtf(sum) + 1e-3f); if (threadIdx.x < block_hist_size) hist[0] = elem * scale; } void normalize_hists(int nbins, int block_stride_x, int block_stride_y, int height, int width, float* block_hists, float threshold, int cell_size_x, int cell_size_y, int ncells_block_x, int ncells_block_y, const cudaStream_t& stream) { const int nblocks = 1; int block_hist_size = nbins * ncells_block_x * ncells_block_y; int nthreads = power_2up(block_hist_size); dim3 threads(nthreads, 1, nblocks); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; int img_block_height = (height - ncells_block_y * cell_size_y + block_stride_y) / block_stride_y; dim3 grid(divUp(img_block_width, nblocks), img_block_height); if (nthreads == 32) normalize_hists_kernel_many_blocks<32, nblocks><<<grid, threads, 0, stream>>>(block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 64) normalize_hists_kernel_many_blocks<64, nblocks><<<grid, threads, 0, stream>>>(block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 128) normalize_hists_kernel_many_blocks<128, nblocks><<<grid, threads, 0, stream>>>(block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 256) normalize_hists_kernel_many_blocks<256, nblocks><<<grid, threads, 0, stream>>>(block_hist_size, img_block_width, block_hists, threshold); else if (nthreads == 512) normalize_hists_kernel_many_blocks<512, nblocks><<<grid, threads, 0, stream>>>(block_hist_size, img_block_width, block_hists, threshold); else CV_Error(cv::Error::StsBadArg, "normalize_hists: histogram's size is too big, try to decrease number of bins"); cudaSafeCall( cudaGetLastError() ); } //--------------------------------------------------------------------- // Linear SVM based classification // // return confidence values not just positive location template <int nthreads, // Number of threads per one histogram block int nblocks> // Number of histogram block processed by single GPU thread block __global__ void compute_confidence_hists_kernel_many_blocks(const int img_win_width, const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, const float* coefs, float free_coef, float threshold, float* confidences) { const int win_x = threadIdx.z; if (blockIdx.x * blockDim.z + win_x >= img_win_width) return; const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x * blockDim.z + win_x) * cblock_hist_size; float product = 0.f; for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } __shared__ float products[nthreads * nblocks]; const int tid = threadIdx.z * nthreads + threadIdx.x; reduce<nthreads>(products, product, tid, plus<float>()); if (threadIdx.x == 0) confidences[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = product + free_coef; } void compute_confidence_hists(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, float* coefs, float free_coef, float threshold, int cell_size_x, int ncells_block_x, float *confidences) { const int nthreads = 256; const int nblocks = 1; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1, nblocks); dim3 grid(divUp(img_win_width, nblocks), img_win_height); cudaSafeCall(cudaFuncSetCacheConfig(compute_confidence_hists_kernel_many_blocks<nthreads, nblocks>, cudaFuncCachePreferL1)); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; compute_confidence_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>( img_win_width, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, coefs, free_coef, threshold, confidences); cudaSafeCall(cudaThreadSynchronize()); } template <int nthreads, // Number of threads per one histogram block int nblocks> // Number of histogram block processed by single GPU thread block __global__ void classify_hists_kernel_many_blocks(const int img_win_width, const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, const float* coefs, float free_coef, float threshold, unsigned char* labels) { const int win_x = threadIdx.z; if (blockIdx.x * blockDim.z + win_x >= img_win_width) return; const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x * blockDim.z + win_x) * cblock_hist_size; float product = 0.f; for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; product += coefs[i] * hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } __shared__ float products[nthreads * nblocks]; const int tid = threadIdx.z * nthreads + threadIdx.x; reduce<nthreads>(products, product, tid, plus<float>()); if (threadIdx.x == 0) labels[blockIdx.y * img_win_width + blockIdx.x * blockDim.z + win_x] = (product + free_coef >= threshold); } void classify_hists(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, float* coefs, float free_coef, float threshold, int cell_size_x, int ncells_block_x, unsigned char* labels) { const int nthreads = 256; const int nblocks = 1; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1, nblocks); dim3 grid(divUp(img_win_width, nblocks), img_win_height); cudaSafeCall(cudaFuncSetCacheConfig(classify_hists_kernel_many_blocks<nthreads, nblocks>, cudaFuncCachePreferL1)); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; classify_hists_kernel_many_blocks<nthreads, nblocks><<<grid, threads>>>( img_win_width, img_block_width, win_block_stride_x, win_block_stride_y, block_hists, coefs, free_coef, threshold, labels); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //---------------------------------------------------------------------------- // Extract descriptors template <int nthreads> __global__ void extract_descrs_by_rows_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, PtrStepf descriptors) { // Get left top corner of the window in src const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x); // Copy elements from src to dst for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int offset_y = i / cdescr_width; int offset_x = i - offset_y * cdescr_width; descriptor[i] = hist[offset_y * img_block_width * cblock_hist_size + offset_x]; } } void extract_descrs_by_rows(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, int cell_size_x, int ncells_block_x, PtrStepSzf descriptors, const cudaStream_t& stream) { const int nthreads = 256; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1); dim3 grid(img_win_width, img_win_height); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; extract_descrs_by_rows_kernel<nthreads><<<grid, threads, 0, stream>>>(img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors); cudaSafeCall( cudaGetLastError() ); } template <int nthreads> __global__ void extract_descrs_by_cols_kernel(const int img_block_width, const int win_block_stride_x, const int win_block_stride_y, const float* block_hists, PtrStepf descriptors) { // Get left top corner of the window in src const float* hist = block_hists + (blockIdx.y * win_block_stride_y * img_block_width + blockIdx.x * win_block_stride_x) * cblock_hist_size; // Get left top corner of the window in dst float* descriptor = descriptors.ptr(blockIdx.y * gridDim.x + blockIdx.x); // Copy elements from src to dst for (int i = threadIdx.x; i < cdescr_size; i += nthreads) { int block_idx = i / cblock_hist_size; int idx_in_block = i - block_idx * cblock_hist_size; int y = block_idx / cnblocks_win_x; int x = block_idx - y * cnblocks_win_x; descriptor[(x * cnblocks_win_y + y) * cblock_hist_size + idx_in_block] = hist[(y * img_block_width + x) * cblock_hist_size + idx_in_block]; } } void extract_descrs_by_cols(int win_height, int win_width, int block_stride_y, int block_stride_x, int win_stride_y, int win_stride_x, int height, int width, float* block_hists, int cell_size_x, int ncells_block_x, PtrStepSzf descriptors, const cudaStream_t& stream) { const int nthreads = 256; int win_block_stride_x = win_stride_x / block_stride_x; int win_block_stride_y = win_stride_y / block_stride_y; int img_win_width = (width - win_width + win_stride_x) / win_stride_x; int img_win_height = (height - win_height + win_stride_y) / win_stride_y; dim3 threads(nthreads, 1); dim3 grid(img_win_width, img_win_height); int img_block_width = (width - ncells_block_x * cell_size_x + block_stride_x) / block_stride_x; extract_descrs_by_cols_kernel<nthreads><<<grid, threads, 0, stream>>>(img_block_width, win_block_stride_x, win_block_stride_y, block_hists, descriptors); cudaSafeCall( cudaGetLastError() ); } //---------------------------------------------------------------------------- // Gradients computation template <int nthreads, int correct_gamma> __global__ void compute_gradients_8UC4_kernel(int height, int width, const PtrStepb img, float angle_scale, PtrStepf grad, PtrStepb qangle) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const uchar4* row = (const uchar4*)img.ptr(blockIdx.y); __shared__ float sh_row[(nthreads + 2) * 3]; uchar4 val; if (x < width) val = row[x]; else val = row[width - 2]; sh_row[threadIdx.x + 1] = val.x; sh_row[threadIdx.x + 1 + (nthreads + 2)] = val.y; sh_row[threadIdx.x + 1 + 2 * (nthreads + 2)] = val.z; if (threadIdx.x == 0) { val = row[::max(x - 1, 1)]; sh_row[0] = val.x; sh_row[(nthreads + 2)] = val.y; sh_row[2 * (nthreads + 2)] = val.z; } if (threadIdx.x == blockDim.x - 1) { val = row[::min(x + 1, width - 2)]; sh_row[blockDim.x + 1] = val.x; sh_row[blockDim.x + 1 + (nthreads + 2)] = val.y; sh_row[blockDim.x + 1 + 2 * (nthreads + 2)] = val.z; } __syncthreads(); if (x < width) { float3 a, b; b.x = sh_row[threadIdx.x + 2]; b.y = sh_row[threadIdx.x + 2 + (nthreads + 2)]; b.z = sh_row[threadIdx.x + 2 + 2 * (nthreads + 2)]; a.x = sh_row[threadIdx.x]; a.y = sh_row[threadIdx.x + (nthreads + 2)]; a.z = sh_row[threadIdx.x + 2 * (nthreads + 2)]; float3 dx; if (correct_gamma) dx = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z)); else dx = make_float3(b.x - a.x, b.y - a.y, b.z - a.z); float3 dy = make_float3(0.f, 0.f, 0.f); if (blockIdx.y > 0 && blockIdx.y < height - 1) { val = ((const uchar4*)img.ptr(blockIdx.y - 1))[x]; a = make_float3(val.x, val.y, val.z); val = ((const uchar4*)img.ptr(blockIdx.y + 1))[x]; b = make_float3(val.x, val.y, val.z); if (correct_gamma) dy = make_float3(::sqrtf(b.x) - ::sqrtf(a.x), ::sqrtf(b.y) - ::sqrtf(a.y), ::sqrtf(b.z) - ::sqrtf(a.z)); else dy = make_float3(b.x - a.x, b.y - a.y, b.z - a.z); } float best_dx = dx.x; float best_dy = dy.x; float mag0 = dx.x * dx.x + dy.x * dy.x; float mag1 = dx.y * dx.y + dy.y * dy.y; if (mag0 < mag1) { best_dx = dx.y; best_dy = dy.y; mag0 = mag1; } mag1 = dx.z * dx.z + dy.z * dy.z; if (mag0 < mag1) { best_dx = dx.z; best_dy = dy.z; mag0 = mag1; } mag0 = ::sqrtf(mag0); float ang = (::atan2f(best_dy, best_dx) + CV_PI_F) * angle_scale - 0.5f; int hidx = (int)::floorf(ang); ang -= hidx; hidx = (hidx + cnbins) % cnbins; ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins); ((float2*)grad.ptr(blockIdx.y))[x] = make_float2(mag0 * (1.f - ang), mag0 * ang); } } void compute_gradients_8UC4(int nbins, int height, int width, const PtrStepSzb& img, float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma, const cudaStream_t& stream) { CV_UNUSED(nbins); const int nthreads = 256; dim3 bdim(nthreads, 1); dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y)); if (correct_gamma) compute_gradients_8UC4_kernel<nthreads, 1><<<gdim, bdim, 0, stream>>>(height, width, img, angle_scale, grad, qangle); else compute_gradients_8UC4_kernel<nthreads, 0><<<gdim, bdim, 0, stream>>>(height, width, img, angle_scale, grad, qangle); cudaSafeCall( cudaGetLastError() ); } template <int nthreads, int correct_gamma> __global__ void compute_gradients_8UC1_kernel(int height, int width, const PtrStepb img, float angle_scale, PtrStepf grad, PtrStepb qangle) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const unsigned char* row = (const unsigned char*)img.ptr(blockIdx.y); __shared__ float sh_row[nthreads + 2]; if (x < width) sh_row[threadIdx.x + 1] = row[x]; else sh_row[threadIdx.x + 1] = row[width - 2]; if (threadIdx.x == 0) sh_row[0] = row[::max(x - 1, 1)]; if (threadIdx.x == blockDim.x - 1) sh_row[blockDim.x + 1] = row[::min(x + 1, width - 2)]; __syncthreads(); if (x < width) { float dx; if (correct_gamma) dx = ::sqrtf(sh_row[threadIdx.x + 2]) - ::sqrtf(sh_row[threadIdx.x]); else dx = sh_row[threadIdx.x + 2] - sh_row[threadIdx.x]; float dy = 0.f; if (blockIdx.y > 0 && blockIdx.y < height - 1) { float a = ((const unsigned char*)img.ptr(blockIdx.y + 1))[x]; float b = ((const unsigned char*)img.ptr(blockIdx.y - 1))[x]; if (correct_gamma) dy = ::sqrtf(a) - ::sqrtf(b); else dy = a - b; } float mag = ::sqrtf(dx * dx + dy * dy); float ang = (::atan2f(dy, dx) + CV_PI_F) * angle_scale - 0.5f; int hidx = (int)::floorf(ang); ang -= hidx; hidx = (hidx + cnbins) % cnbins; ((uchar2*)qangle.ptr(blockIdx.y))[x] = make_uchar2(hidx, (hidx + 1) % cnbins); ((float2*) grad.ptr(blockIdx.y))[x] = make_float2(mag * (1.f - ang), mag * ang); } } void compute_gradients_8UC1(int nbins, int height, int width, const PtrStepSzb& img, float angle_scale, PtrStepSzf grad, PtrStepSzb qangle, bool correct_gamma, const cudaStream_t& stream) { CV_UNUSED(nbins); const int nthreads = 256; dim3 bdim(nthreads, 1); dim3 gdim(divUp(width, bdim.x), divUp(height, bdim.y)); if (correct_gamma) compute_gradients_8UC1_kernel<nthreads, 1><<<gdim, bdim, 0, stream>>>(height, width, img, angle_scale, grad, qangle); else compute_gradients_8UC1_kernel<nthreads, 0><<<gdim, bdim, 0, stream>>>(height, width, img, angle_scale, grad, qangle); cudaSafeCall( cudaGetLastError() ); } //------------------------------------------------------------------- // Resize texture<uchar4, 2, cudaReadModeNormalizedFloat> resize8UC4_tex; texture<uchar, 2, cudaReadModeNormalizedFloat> resize8UC1_tex; __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar> dst, int colOfs) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) dst.ptr(y)[x] = tex2D(resize8UC1_tex, x * sx + colOfs, y * sy) * 255; } __global__ void resize_for_hog_kernel(float sx, float sy, PtrStepSz<uchar4> dst, int colOfs) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < dst.cols && y < dst.rows) { float4 val = tex2D(resize8UC4_tex, x * sx + colOfs, y * sy); dst.ptr(y)[x] = make_uchar4(val.x * 255, val.y * 255, val.z * 255, val.w * 255); } } template<class T, class TEX> static void resize_for_hog(const PtrStepSzb& src, PtrStepSzb dst, TEX& tex) { tex.filterMode = cudaFilterModeLinear; size_t texOfs = 0; int colOfs = 0; cudaChannelFormatDesc desc = cudaCreateChannelDesc<T>(); cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) ); if (texOfs != 0) { colOfs = static_cast<int>( texOfs/sizeof(T) ); cudaSafeCall( cudaUnbindTexture(tex) ); cudaSafeCall( cudaBindTexture2D(&texOfs, tex, src.data, desc, src.cols, src.rows, src.step) ); } dim3 threads(32, 8); dim3 grid(divUp(dst.cols, threads.x), divUp(dst.rows, threads.y)); float sx = static_cast<float>(src.cols) / dst.cols; float sy = static_cast<float>(src.rows) / dst.rows; resize_for_hog_kernel<<<grid, threads>>>(sx, sy, (PtrStepSz<T>)dst, colOfs); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); cudaSafeCall( cudaUnbindTexture(tex) ); } void resize_8UC1(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar> (src, dst, resize8UC1_tex); } void resize_8UC4(const PtrStepSzb& src, PtrStepSzb dst) { resize_for_hog<uchar4>(src, dst, resize8UC4_tex); } } // namespace hog }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
d4ac41721e0f0544b342c11a2c962024e00925fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffepro/layers/eltwise_amax_layer.h> #include <caffepro/utils/utils.h> namespace caffepro { eltwise_amax_layer::eltwise_amax_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = 2; attr_.num_inputs_max = 2; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES | layer_attribute::CF_REQUIRE_SAME_DEVICE | layer_attribute::CF_REQUIRE_SAME_DIMTYPE | layer_attribute::CF_REQUIRE_SAME_COUNT | layer_attribute::CF_REQUIRE_SAME_NUM ); } eltwise_amax_layer::~eltwise_amax_layer() { release_all(); } void eltwise_amax_layer::init() { check_input(); } __global__ static void eltamax_forward(int n, const data_type *input1, const data_type *input2, data_type *output) { CUDA_KERNEL_LOOP(index, n) { if (fabsf(input1[index]) >= fabsf(input2[index])) { output[index] = input1[index]; } else { output[index] = input2[index]; } } } void eltwise_amax_layer::on_forward(int device_index) { auto &input1 = *inputs_[0]->get(device_index); auto &input2 = *inputs_[1]->get(device_index); auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(eltamax_forward, input1.count())(input1.count(), input1.gpu_data(), input2.gpu_data(), output.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } __global__ static void eltamax_backward(int n, const data_type *input1_data, const data_type *input2_data, const data_type *diff, data_type *input1_diff, data_type *input2_diff, const data_type scale_target1, const data_type scale_target2, bool bp1, bool bp2) { CUDA_KERNEL_LOOP(index, n) { if (fabsf(input1_data[index]) >= fabsf(input2_data[index])) { if (bp1) { if (scale_target1 == 0) { input1_diff[index] = diff[index]; } else { input1_diff[index] = diff[index] + input1_diff[index] * scale_target1; } } if (bp2) { if (scale_target2 == 0) { input2_diff[index] = 0; } else { input2_diff[index] = input2_diff[index] * scale_target2; } } } else { if (bp1) { if (scale_target1 == 0) { input1_diff[index] = 0; } else { input1_diff[index] = input1_diff[index] * scale_target1; } } if (bp2) { if (scale_target2 == 0) { input2_diff[index] = diff[index]; } else { input2_diff[index] = diff[index] + input2_diff[index] * scale_target2; } } } } } void eltwise_amax_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0) || should_bp(bp_acts, 1)) { bool bp1 = should_bp(bp_acts, 0); bool bp2 = should_bp(bp_acts, 1); data_type beta1 = get_beta(clear_acts_diff, 0); data_type beta2 = get_beta(clear_acts_diff, 1); auto &input1 = *inputs_[0]->get(device_index); auto &input2 = *inputs_[1]->get(device_index); auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(eltamax_backward, input1.count())(input1.count(), input1.gpu_data(), input2.gpu_data(), output.gpu_diff(), input1.mutable_gpu_diff(), input2.mutable_gpu_diff(), beta1, beta2, bp1, bp2); CUDA_POST_KERNEL_CHECK; } } }
d4ac41721e0f0544b342c11a2c962024e00925fe.cu
#include <caffepro/layers/eltwise_amax_layer.h> #include <caffepro/utils/utils.h> namespace caffepro { eltwise_amax_layer::eltwise_amax_layer(caffepro_context *context, const LayerParameter &param) : caffepro_layer(context, param) { attr_.num_inputs_min = 2; attr_.num_inputs_max = 2; attr_.num_outputs_min = attr_.num_outputs_max = 1; attr_.set_constraint( layer_attribute::CF_REQUIRE_SAME_DIMTYPE_ACROSS_DEVICES | layer_attribute::CF_REQUIRE_SAME_DEVICE | layer_attribute::CF_REQUIRE_SAME_DIMTYPE | layer_attribute::CF_REQUIRE_SAME_COUNT | layer_attribute::CF_REQUIRE_SAME_NUM ); } eltwise_amax_layer::~eltwise_amax_layer() { release_all(); } void eltwise_amax_layer::init() { check_input(); } __global__ static void eltamax_forward(int n, const data_type *input1, const data_type *input2, data_type *output) { CUDA_KERNEL_LOOP(index, n) { if (fabsf(input1[index]) >= fabsf(input2[index])) { output[index] = input1[index]; } else { output[index] = input2[index]; } } } void eltwise_amax_layer::on_forward(int device_index) { auto &input1 = *inputs_[0]->get(device_index); auto &input2 = *inputs_[1]->get(device_index); auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(eltamax_forward, input1.count())(input1.count(), input1.gpu_data(), input2.gpu_data(), output.mutable_gpu_data()); CUDA_POST_KERNEL_CHECK; } __global__ static void eltamax_backward(int n, const data_type *input1_data, const data_type *input2_data, const data_type *diff, data_type *input1_diff, data_type *input2_diff, const data_type scale_target1, const data_type scale_target2, bool bp1, bool bp2) { CUDA_KERNEL_LOOP(index, n) { if (fabsf(input1_data[index]) >= fabsf(input2_data[index])) { if (bp1) { if (scale_target1 == 0) { input1_diff[index] = diff[index]; } else { input1_diff[index] = diff[index] + input1_diff[index] * scale_target1; } } if (bp2) { if (scale_target2 == 0) { input2_diff[index] = 0; } else { input2_diff[index] = input2_diff[index] * scale_target2; } } } else { if (bp1) { if (scale_target1 == 0) { input1_diff[index] = 0; } else { input1_diff[index] = input1_diff[index] * scale_target1; } } if (bp2) { if (scale_target2 == 0) { input2_diff[index] = diff[index]; } else { input2_diff[index] = diff[index] + input2_diff[index] * scale_target2; } } } } } void eltwise_amax_layer::on_backward(int device_index, act_selector bp_acts, weight_selector bp_weights, act_selector clear_acts_diff, weight_selector clear_weights_diff) { if (should_bp(bp_acts, 0) || should_bp(bp_acts, 1)) { bool bp1 = should_bp(bp_acts, 0); bool bp2 = should_bp(bp_acts, 1); data_type beta1 = get_beta(clear_acts_diff, 0); data_type beta2 = get_beta(clear_acts_diff, 1); auto &input1 = *inputs_[0]->get(device_index); auto &input2 = *inputs_[1]->get(device_index); auto &output = *outputs_[0]->get(device_index); KERNEL_CALL(eltamax_backward, input1.count())(input1.count(), input1.gpu_data(), input2.gpu_data(), output.gpu_diff(), input1.mutable_gpu_diff(), input2.mutable_gpu_diff(), beta1, beta2, bp1, bp2); CUDA_POST_KERNEL_CHECK; } } }
e90907fabc8cdefd324473266b69132fcd0e069b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <iomanip> #include <stdio.h> #include <chrono> #include <random> // For kernel generation #include <algorithm> #include <list> using namespace std; #pragma region Cuda const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; const int CUDA_NUM_THREADS = 1024; int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum return ::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } __global__ void ConvolutionRowwise(const float *input, float *rowwiseResults, int batch_ix, int channel_ix, int input_dim, int result_dim ) { float* res1 = rowwiseResults + (blockIdx.x * result_dim); float* res2 = res1 + (input_dim * result_dim); float* res3 = res2 + (input_dim * result_dim); input = input + (blockIdx.x * input_dim); float l1 = input[0], l2 = input[1], l3 = input[2]; for (int i = 3; i < input_dim; ++i) { *res1 = (l1 + l2 + l3); ++res1; *res2 = (l1 - l2 + l3); ++res2; *res3 = (l1 + l2 - l3); ++res3; l1 = l2; l2 = l3; l3 = input[i]; } *res1 = (l1 + l2 + l3); *res2 = (l1 - l2 + l3); *res3 = (l1 + l2 - l3); } __global__ void ConvolutionColwise(const float *rowwiseResults, float *colwiseResults, int inputDim, int resultDim) { // blockDim // Z tells us which rowwiseResults matrix to work on {0,1,2} // X tells us the rowwiseResults matrix top-row // Y tells us the rowwiseResults matrix col int topCell = (blockIdx.z *inputDim*resultDim) + (blockIdx.x * resultDim) + blockIdx.y; float l1 = rowwiseResults[topCell]; float l2 = rowwiseResults[topCell + resultDim]; float l3 = rowwiseResults[topCell + resultDim + resultDim]; topCell = (blockIdx.z * resultDim * resultDim * 3) + (blockIdx.x * resultDim) + blockIdx.y; colwiseResults[topCell] = l1 + l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 - l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 + l2 - l3; } #pragma endregion #pragma region Misc std::default_random_engine randomGeneratorEngine; std::uniform_real_distribution<float> randomGenerator; float *CreateArray(int size) { int i; float *arr; hipMallocManaged(&arr, size); for (i = 0; i < size; ++i) { arr[i] = i + 1; cout << arr[i] << " "; //arr[i] = (int)(randomGenerator(randomGeneratorEngine) * 10); } cout << endl; return arr; } void PrintMat(float *mat, int rows, int cols) { for (int i = 0; i < rows; ++i) { cout << "["; for (int j = 0; j < cols - 1; ++j) { cout << mat[i*cols + j] << " "; } cout << mat[i*cols + (cols - 1)] << "]"; if (i < rows - 1) cout << endl; } cout << endl; } template <typename Function> void zip(const vector<int> &batchSizes, const vector<int> &inputChannels, const vector<int> &outputChannels, const vector<int> &inputDims, Function function) { for (int batchSize : batchSizes) for (int inputChannel : inputChannels) for (int outputChannel : outputChannels) for (int inputDim : inputDims) function(batchSize, inputChannel, outputChannel, inputDim); } #pragma endregion int main() { const vector<int> batchSizes = { 1 }; const vector<int> inputChannels = { 1 }; const vector<int> outputChannels = { 1 }; const vector<int> inputDims = { 10 }; // 16, 32, 64, 128, 256, 512, 650, 1024, 1280, 1500 std::cout << std::setfill('0') << std::setw(5) << std::fixed << std::setprecision(1); zip(batchSizes, inputChannels, outputChannels, inputDims, [](int batchIndex, int inputChannel, int outputChannel, int inputDim) { float *rowwiseResults; int resultDim = inputDim - 2; int rowwiseResultsSize = 3 * inputDim * resultDim; hipMallocManaged(&rowwiseResults, rowwiseResultsSize); float *arr = CreateArray(inputDim * inputDim); dim3 grid(inputDim); hipLaunchKernelGGL(( ConvolutionRowwise) , dim3(grid), dim3(1) , 0, 0, arr, rowwiseResults, batchIndex, inputChannel, inputDim, resultDim); hipDeviceSynchronize(); PrintMat(rowwiseResults, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + inputDim * resultDim, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + (2 * (inputDim * resultDim)), inputDim, resultDim); cout << endl; float *colResults; hipMallocManaged <float>(&colResults, 9 * resultDim * resultDim); grid = dim3(resultDim, resultDim, 3); hipLaunchKernelGGL(( ConvolutionColwise) , dim3(grid), dim3(1) , 0, 0, rowwiseResults, colResults, inputDim, resultDim); hipDeviceSynchronize(); for (int i = 0; i < 9; i++) { PrintMat(colResults + (i*resultDim*resultDim), inputDim, resultDim); cout << endl; } }); //for (auto& [a, b] : zip(batchSizes, inputChannels)) { return 0; }
e90907fabc8cdefd324473266b69132fcd0e069b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <iomanip> #include <stdio.h> #include <chrono> #include <random> // For kernel generation #include <algorithm> #include <list> using namespace std; #pragma region Cuda const int WARP_SIZE = 32; // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. const int MAX_BLOCK_SIZE = 256; const int CUDA_NUM_THREADS = 1024; int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } static int getGradParamsNumThreads(int batchSize){ //warp per item in a batch, up to a maximum return std::min(batchSize * WARP_SIZE, MAX_BLOCK_SIZE); } __global__ void ConvolutionRowwise(const float *input, float *rowwiseResults, int batch_ix, int channel_ix, int input_dim, int result_dim ) { float* res1 = rowwiseResults + (blockIdx.x * result_dim); float* res2 = res1 + (input_dim * result_dim); float* res3 = res2 + (input_dim * result_dim); input = input + (blockIdx.x * input_dim); float l1 = input[0], l2 = input[1], l3 = input[2]; for (int i = 3; i < input_dim; ++i) { *res1 = (l1 + l2 + l3); ++res1; *res2 = (l1 - l2 + l3); ++res2; *res3 = (l1 + l2 - l3); ++res3; l1 = l2; l2 = l3; l3 = input[i]; } *res1 = (l1 + l2 + l3); *res2 = (l1 - l2 + l3); *res3 = (l1 + l2 - l3); } __global__ void ConvolutionColwise(const float *rowwiseResults, float *colwiseResults, int inputDim, int resultDim) { // blockDim // Z tells us which rowwiseResults matrix to work on {0,1,2} // X tells us the rowwiseResults matrix top-row // Y tells us the rowwiseResults matrix col int topCell = (blockIdx.z *inputDim*resultDim) + (blockIdx.x * resultDim) + blockIdx.y; float l1 = rowwiseResults[topCell]; float l2 = rowwiseResults[topCell + resultDim]; float l3 = rowwiseResults[topCell + resultDim + resultDim]; topCell = (blockIdx.z * resultDim * resultDim * 3) + (blockIdx.x * resultDim) + blockIdx.y; colwiseResults[topCell] = l1 + l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 - l2 + l3; topCell += resultDim * resultDim; colwiseResults[topCell] = l1 + l2 - l3; } #pragma endregion #pragma region Misc std::default_random_engine randomGeneratorEngine; std::uniform_real_distribution<float> randomGenerator; float *CreateArray(int size) { int i; float *arr; cudaMallocManaged(&arr, size); for (i = 0; i < size; ++i) { arr[i] = i + 1; cout << arr[i] << " "; //arr[i] = (int)(randomGenerator(randomGeneratorEngine) * 10); } cout << endl; return arr; } void PrintMat(float *mat, int rows, int cols) { for (int i = 0; i < rows; ++i) { cout << "["; for (int j = 0; j < cols - 1; ++j) { cout << mat[i*cols + j] << " "; } cout << mat[i*cols + (cols - 1)] << "]"; if (i < rows - 1) cout << endl; } cout << endl; } template <typename Function> void zip(const vector<int> &batchSizes, const vector<int> &inputChannels, const vector<int> &outputChannels, const vector<int> &inputDims, Function function) { for (int batchSize : batchSizes) for (int inputChannel : inputChannels) for (int outputChannel : outputChannels) for (int inputDim : inputDims) function(batchSize, inputChannel, outputChannel, inputDim); } #pragma endregion int main() { const vector<int> batchSizes = { 1 }; const vector<int> inputChannels = { 1 }; const vector<int> outputChannels = { 1 }; const vector<int> inputDims = { 10 }; // 16, 32, 64, 128, 256, 512, 650, 1024, 1280, 1500 std::cout << std::setfill('0') << std::setw(5) << std::fixed << std::setprecision(1); zip(batchSizes, inputChannels, outputChannels, inputDims, [](int batchIndex, int inputChannel, int outputChannel, int inputDim) { float *rowwiseResults; int resultDim = inputDim - 2; int rowwiseResultsSize = 3 * inputDim * resultDim; cudaMallocManaged(&rowwiseResults, rowwiseResultsSize); float *arr = CreateArray(inputDim * inputDim); dim3 grid(inputDim); ConvolutionRowwise <<< grid, 1 >>> (arr, rowwiseResults, batchIndex, inputChannel, inputDim, resultDim); cudaDeviceSynchronize(); PrintMat(rowwiseResults, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + inputDim * resultDim, inputDim, resultDim); cout << endl; PrintMat(rowwiseResults + (2 * (inputDim * resultDim)), inputDim, resultDim); cout << endl; float *colResults; cudaMallocManaged <float>(&colResults, 9 * resultDim * resultDim); grid = dim3(resultDim, resultDim, 3); ConvolutionColwise <<< grid, 1 >>> (rowwiseResults, colResults, inputDim, resultDim); cudaDeviceSynchronize(); for (int i = 0; i < 9; i++) { PrintMat(colResults + (i*resultDim*resultDim), inputDim, resultDim); cout << endl; } }); //for (auto& [a, b] : zip(batchSizes, inputChannels)) { return 0; }
b5a6fe517e5120a6efebaf3428b3e0d7b01dc2a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_update_halo_kernel3_plus_4_a [3][2]; static int dims_update_halo_kernel3_plus_4_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel3_plus_4_a_gpu(ACC<double> &vol_flux_x, ACC<double> &mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,4,0); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,4,0); } __global__ void ops_update_halo_kernel3_plus_4_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_a[0][0] * dims_update_halo_kernel3_plus_4_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_a[1][0] * dims_update_halo_kernel3_plus_4_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel3_plus_4_a[0][0], dims_update_halo_kernel3_plus_4_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel3_plus_4_a[1][0], dims_update_halo_kernel3_plus_4_a[1][1], arg1); update_halo_kernel3_plus_4_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_plus_4_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,59)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(59,"update_halo_kernel3_plus_4_a"); OPS_kernels[59].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel3_plus_4_a_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_4_a_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_4_a_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_4_a_h[1][1]) { dims_update_halo_kernel3_plus_4_a_h[0][0] = xdim0; dims_update_halo_kernel3_plus_4_a_h[0][1] = ydim0; dims_update_halo_kernel3_plus_4_a_h[1][0] = xdim1; dims_update_halo_kernel3_plus_4_a_h[1][1] = ydim1; cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel3_plus_4_a, dims_update_halo_kernel3_plus_4_a_h, sizeof(dims_update_halo_kernel3_plus_4_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[59].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[59].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[59].mpi_time += t2-t1; OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 59; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 59; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_plus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(59,"update_halo_kernel3_plus_4_a"); } ops_enqueue_kernel(desc); } #endif
b5a6fe517e5120a6efebaf3428b3e0d7b01dc2a7.cu
// // auto-generated by ops.py // __constant__ int dims_update_halo_kernel3_plus_4_a [3][2]; static int dims_update_halo_kernel3_plus_4_a_h [3][2] = {0}; //user function __device__ inline void update_halo_kernel3_plus_4_a_gpu(ACC<double> &vol_flux_x, ACC<double> &mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x(0,0,0) = vol_flux_x(0,4,0); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x(0,0,0) = mass_flux_x(0,4,0); } __global__ void ops_update_halo_kernel3_plus_4_a( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_a[0][0] * dims_update_halo_kernel3_plus_4_a[0][1]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel3_plus_4_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel3_plus_4_a[1][0] * dims_update_halo_kernel3_plus_4_a[1][1]; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { ACC<double> argp0(dims_update_halo_kernel3_plus_4_a[0][0], dims_update_halo_kernel3_plus_4_a[0][1], arg0); ACC<double> argp1(dims_update_halo_kernel3_plus_4_a[1][0], dims_update_halo_kernel3_plus_4_a[1][1], arg1); update_halo_kernel3_plus_4_a_gpu(argp0, argp1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_plus_4_a_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,59)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(59,"update_halo_kernel3_plus_4_a"); OPS_kernels[59].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[3]; #endif #ifdef OPS_MPI if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != dims_update_halo_kernel3_plus_4_a_h[0][0] || ydim0 != dims_update_halo_kernel3_plus_4_a_h[0][1] || xdim1 != dims_update_halo_kernel3_plus_4_a_h[1][0] || ydim1 != dims_update_halo_kernel3_plus_4_a_h[1][1]) { dims_update_halo_kernel3_plus_4_a_h[0][0] = xdim0; dims_update_halo_kernel3_plus_4_a_h[0][1] = ydim0; dims_update_halo_kernel3_plus_4_a_h[1][0] = xdim1; dims_update_halo_kernel3_plus_4_a_h[1][1] = ydim1; cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel3_plus_4_a, dims_update_halo_kernel3_plus_4_a_h, sizeof(dims_update_halo_kernel3_plus_4_a))); } int *arg2h = (int *)arg2.data; int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[59].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel3_plus_4_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[59].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[59].mpi_time += t2-t1; OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 59; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 59; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_plus_4_a_execute; if (OPS_diags > 1) { ops_timing_realloc(59,"update_halo_kernel3_plus_4_a"); } ops_enqueue_kernel(desc); } #endif
c5b2f1e728d233bbcc1a6db5ebe4dbb6e2d14aa6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void initWith(float num, float *a, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for (int i = 0; i < N; i++) { if (vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; hipGetDevice(&deviceId); hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId); const int N = 2 << 24; size_t size = N * sizeof(float); float *a; float *b; float *c; float *h_c; hipMalloc(&a, size); hipMalloc(&b, size); hipMalloc(&c, size); hipHostMalloc(&h_c, size); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; hipError_t addVectorsErr; hipError_t asyncErr; /* * Create 3 streams to run initialize the 3 data vectors in parallel. */ hipStream_t stream1, stream2, stream3; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); /* * Give each `initWith` launch its own non-standard stream. */ initWith << <numberOfBlocks, threadsPerBlock, 0, stream1 >> > (3, a, N); initWith << <numberOfBlocks, threadsPerBlock, 0, stream2 >> > (4, b, N); initWith << <numberOfBlocks, threadsPerBlock, 0, stream3 >> > (0, c, N); hipDeviceSynchronize(); for (int i = 0; i < 4; ++i) { hipStream_t stream; hipStreamCreate(&stream); addVectorsInto << <numberOfBlocks / 4, threadsPerBlock, 0, stream >> > (&c[i*N / 4], &a[i*N / 4], &b[i*N / 4], N / 4); hipMemcpyAsync(&h_c[i*N / 4], &c[i*N / 4], size / 4, hipMemcpyDeviceToHost, stream); hipStreamDestroy(stream); } addVectorsErr = hipGetLastError(); if (addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr)); asyncErr = hipMemcpy(h_c, c, size, hipMemcpyDeviceToHost); if (asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr)); checkElementsAre(7, h_c, N); /* * Destroy streams when they are no longer needed. */ hipStreamDestroy(stream1); hipStreamDestroy(stream2); hipStreamDestroy(stream3); hipFree(a); hipFree(b); hipFree(c); hipHostFree(h_c); }
c5b2f1e728d233bbcc1a6db5ebe4dbb6e2d14aa6.cu
#include <stdio.h> __global__ void initWith(float num, float *a, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { a[i] = num; } } __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < N; i += stride) { result[i] = a[i] + b[i]; } } void checkElementsAre(float target, float *vector, int N) { for (int i = 0; i < N; i++) { if (vector[i] != target) { printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target); exit(1); } } printf("Success! All values calculated correctly.\n"); } int main() { int deviceId; int numberOfSMs; cudaGetDevice(&deviceId); cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId); const int N = 2 << 24; size_t size = N * sizeof(float); float *a; float *b; float *c; float *h_c; cudaMalloc(&a, size); cudaMalloc(&b, size); cudaMalloc(&c, size); cudaMallocHost(&h_c, size); size_t threadsPerBlock; size_t numberOfBlocks; threadsPerBlock = 256; numberOfBlocks = 32 * numberOfSMs; cudaError_t addVectorsErr; cudaError_t asyncErr; /* * Create 3 streams to run initialize the 3 data vectors in parallel. */ cudaStream_t stream1, stream2, stream3; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); /* * Give each `initWith` launch its own non-standard stream. */ initWith << <numberOfBlocks, threadsPerBlock, 0, stream1 >> > (3, a, N); initWith << <numberOfBlocks, threadsPerBlock, 0, stream2 >> > (4, b, N); initWith << <numberOfBlocks, threadsPerBlock, 0, stream3 >> > (0, c, N); cudaDeviceSynchronize(); for (int i = 0; i < 4; ++i) { cudaStream_t stream; cudaStreamCreate(&stream); addVectorsInto << <numberOfBlocks / 4, threadsPerBlock, 0, stream >> > (&c[i*N / 4], &a[i*N / 4], &b[i*N / 4], N / 4); cudaMemcpyAsync(&h_c[i*N / 4], &c[i*N / 4], size / 4, cudaMemcpyDeviceToHost, stream); cudaStreamDestroy(stream); } addVectorsErr = cudaGetLastError(); if (addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr)); asyncErr = cudaMemcpy(h_c, c, size, cudaMemcpyDeviceToHost); if (asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr)); checkElementsAre(7, h_c, N); /* * Destroy streams when they are no longer needed. */ cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); cudaStreamDestroy(stream3); cudaFree(a); cudaFree(b); cudaFree(c); cudaFreeHost(h_c); }
03887e519b586c5eb7692fb446b14583e496fba7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ECGR 6090 Heterogeneous Computing Homework0 Problem 2- 1D stencil using GPU and shared memory Written by Bhavin Thakar - 801151488 */ // To execute the program type: ./1dstencilsharedmemory #include<stdio.h> #include <sys/time.h> struct timeval stop, start,start1,stop1; #define R 4 // Defining radius as 4 #define B 128 // Defining Thread Block Size as 128 #define N 1000000 // Defining Number of Elements as 1M // Kernel Function __global__ void stencil1d(int *in, int *out){ __shared__ int temp[B + 2 * R]; // Declaring a shared integer array int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + R; temp[lindex] = in[gindex]; //storing in shared memory if (threadIdx.x < R) { temp[lindex - R] = in[gindex - R]; temp[lindex + B] = in[gindex + B]; } __syncthreads(); int result = 0; for (int offset = -R ; offset <= R ; offset++) { result += temp[lindex + offset]; } // Store the result out[gindex] = result; } // random function to generate random numbers void random(int *a, int n ){ int i; for (i = 0; i <=n+1; ++i) a[i] = rand()%100; } int main(void){ int n; int *c_in, *c_out; // integer aray for CPU int size= N*sizeof(int); n=N+2*R; // Allocating memory for CPU integer array c_in=(int*)malloc(n*size); c_out=(int*)malloc(N*size); random(c_in,n); // Calling random function int *d_in,*d_out; //integer array for GPU //Allocating memory for GPU integer array hipMalloc(&d_in,n*size); hipMalloc(&d_out,N*size); // Copying input from CPU to GPU hipMemcpy(d_in,c_in,n*size,hipMemcpyHostToDevice); gettimeofday(&start, NULL); hipLaunchKernelGGL(( stencil1d), dim3((N/B-1)/B),dim3(B), 0, 0, d_in,d_out); //Calling Kernel Function gettimeofday(&stop, NULL); hipDeviceSynchronize(); // Check if streams are completed printf("Execution time of kernel: %lu us\n", (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec); // Copying back the results from GPU to CPU hipMemcpy(c_out,d_out,n*size,hipMemcpyDeviceToHost); // Free resources free(c_in); free(c_out); hipFree(d_in); hipFree(d_out); return 0; }
03887e519b586c5eb7692fb446b14583e496fba7.cu
/* ECGR 6090 Heterogeneous Computing Homework0 Problem 2- 1D stencil using GPU and shared memory Written by Bhavin Thakar - 801151488 */ // To execute the program type: ./1dstencilsharedmemory #include<stdio.h> #include <sys/time.h> struct timeval stop, start,start1,stop1; #define R 4 // Defining radius as 4 #define B 128 // Defining Thread Block Size as 128 #define N 1000000 // Defining Number of Elements as 1M // Kernel Function __global__ void stencil1d(int *in, int *out){ __shared__ int temp[B + 2 * R]; // Declaring a shared integer array int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + R; temp[lindex] = in[gindex]; //storing in shared memory if (threadIdx.x < R) { temp[lindex - R] = in[gindex - R]; temp[lindex + B] = in[gindex + B]; } __syncthreads(); int result = 0; for (int offset = -R ; offset <= R ; offset++) { result += temp[lindex + offset]; } // Store the result out[gindex] = result; } // random function to generate random numbers void random(int *a, int n ){ int i; for (i = 0; i <=n+1; ++i) a[i] = rand()%100; } int main(void){ int n; int *c_in, *c_out; // integer aray for CPU int size= N*sizeof(int); n=N+2*R; // Allocating memory for CPU integer array c_in=(int*)malloc(n*size); c_out=(int*)malloc(N*size); random(c_in,n); // Calling random function int *d_in,*d_out; //integer array for GPU //Allocating memory for GPU integer array cudaMalloc(&d_in,n*size); cudaMalloc(&d_out,N*size); // Copying input from CPU to GPU cudaMemcpy(d_in,c_in,n*size,cudaMemcpyHostToDevice); gettimeofday(&start, NULL); stencil1d<<<(N/B-1)/B,B>>>(d_in,d_out); //Calling Kernel Function gettimeofday(&stop, NULL); cudaDeviceSynchronize(); // Check if streams are completed printf("Execution time of kernel: %lu us\n", (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec); // Copying back the results from GPU to CPU cudaMemcpy(c_out,d_out,n*size,cudaMemcpyDeviceToHost); // Free resources free(c_in); free(c_out); cudaFree(d_in); cudaFree(d_out); return 0; }
19af756bd4b390004bd2ff5de90f90f3906e80e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <cudpp.h> #define MAXP 10000 #define MAXN 21 #define MAXG 128000 #define THREADS 256 #define ParticlesInSet 12 #define SetsInBlock 20 struct grid { float oX, oY; float size; int nX, nY; int SN; int* set; int* nump; int* cell; int* perm; }; struct simulation { float minX, maxX; float minY, maxY; float dt; int tsn; int ssi; int nsi; }; // Host Variables int *hMaterial; float *hPosX; float *hPosY; float *hVelX; float *hVelY; float *hDensity; float *hEnergy; float *hPressure; float *hVelDotX; float *hVelDotY; float *hDensityDot; float *hEnergyDot; int *hList; int *hHash; int *hIndex; int *hSetStart; int *hSetStop; int hPN; float hSmooth, hMass, hSound; int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; CUDPPHandle hSortHandle; // Device Variables __device__ int *dMaterial; __device__ float *dPosX; __device__ float *dPosY; __device__ float *dVelX; __device__ float *dVelY; __device__ float *dDensity; __device__ float *dEnergy; __device__ float *dPressure; __device__ float *dVelDotX; __device__ float *dVelDotY; __device__ float *dDensityDot; __device__ float *dEnergyDot; __device__ int *dList; __device__ int *dHash; __device__ int *dIndex; __device__ int *dSetStart; __device__ int *dSetStop; __device__ int *dIntDummy; __device__ float *dFloatDummy; __device__ __constant__ int dPN; __device__ __constant__ float dSmooth, dMass, dSound; __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ float *dPosX0; __device__ float *dPosY0; __device__ float *dVelX0; __device__ float *dVelY0; __device__ float *dDensity0; __device__ float *dEnergy0; // Device code __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D //alpha = 21.0 / (16.0 * M_PI * h * h * h); // for 2D alpha = 7.0 / (4.0 * M_PI * h * h); w = 0.0; if (q < 2) { w = 1.0 - 0.5*q; w *= w; w *= w; w *= 1.0 + 2.0*q; w *= alpha; } return w; } __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D //alpha = 21.0 / (16.0 * M_PI * h * h * h); // for 2D alpha = 7.0 / (4.0 * M_PI * h * h); dwdr = 0; if (q < 2) { dwdr = 5.0 / 8.0 * q * pow((q - 2.0), 3) ; dwdr *= alpha / h; } return dwdr; } __device__ float kernelGauss(float r, float h) { float r2, q2, h2, alpha, w;//, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; //alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); alpha = 1.0 / (3.14 * h2); w = 0.0; //dwdr = 0.0; if (q2 < 4.0) { w = alpha * expf(-q2); //dwdr = w * (-2.0 * r / h2); } return w; } __device__ float kernelDerivGauss(float r, float h) { float r2, q2, h2, alpha, w, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; //alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); alpha = 1.0 / (3.14 * h2); w = 0.0; dwdr = 0.0; if (q2 < 4.0) { w = alpha * expf(-q2); dwdr = w * (-2.0 * r / h2); } return dwdr; } __device__ float pressureGas(int mat ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = dMatProp[mat][1] * pshift = dMatProp[mat][2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = (dMatProp[mat][1] - 1.0) * rho * u; p += dMatProp[mat][2]; // c = sqrtf(dMatProp[mat][1] * (dMatProp[mat][1] - 1.0) * u); return p; } __device__ float pressurePoly(int mat , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = dMatProp[mat][0]; * a1 = dMatProp[mat][1]; * a2 = dMatProp[mat][2]; * a3 = dMatProp[mat][3]; * b0 = dMatProp[mat][4]; * b1 = dMatProp[mat][5]; * t1 = dMatProp[mat][6]; * t2 = dMatProp[mat][7]; * pmin = dMatProp[mat][8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; // float c; mu = (rho - dMatProp[mat][0]) / dMatProp[mat][0]; if (mu < 0) p = (dMatProp[mat][6] * mu + dMatProp[mat][7] * mu*mu) + (dMatProp[mat][4] * dMatProp[mat][0] * u); else p = (dMatProp[mat][1] * mu + dMatProp[mat][2] * mu*mu + dMatProp[mat][3] * mu*mu*mu) + ((dMatProp[mat][4] + dMatProp[mat][5] * mu) * dMatProp[mat][0] * u); if (p < dMatProp[mat][8]) p = dMatProp[mat][8]; // c = sqrtf(dMatProp[mat][1] / rho); return p; } __device__ float pressureShock(int mat, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * g0 = dMatProp[mat][2]; * s0 = dMatProp[mat][3]; * pmin = dMatProp[mat][4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; // float c; mu = (rho - dMatProp[mat][0]) / dMatProp[mat][0]; ph = (dMatProp[mat][0] * powf(dMatProp[mat][1], 2) * mu*(1.0 +mu)) / powf((1.0 - (dMatProp[mat][3] -1.0) * mu), 2); p = ph + dMatProp[mat][2] * dMatProp[mat][0] * (u - (0.5 * ph / dMatProp[mat][0] * (mu / (1.0 + mu)))); if (p < dMatProp[mat][4]) p = dMatProp[mat][4]; // c = dMatProp[mat][1]; return p; } __device__ float pressureTait(int mat, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * pmin = dMatProp[mat][2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = dMatProp[mat][0] * powf(dMatProp[mat][1], 2) / 7.0 * (powf((rho / dMatProp[mat][0]), 7) - 1.0); if (p < dMatProp[mat][2]) p = dMatProp[mat][2]; // c = dMatProp[mat][1]; return p; } // Global code __global__ void kerInteraction2(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ __shared__ float sPosX[THREADS]; __shared__ float sPosY[THREADS]; __shared__ float sVelX[THREADS]; __shared__ float sVelY[THREADS]; __shared__ float sDensity[THREADS]; __shared__ float sPressure[THREADS]; int ip, il, jp, jt; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float jPosX, jPosY; float jVelX, jVelY; float jDensity, jPressure; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; sPosX[threadIdx.x] = dPosX[ip]; sPosY[threadIdx.x] = dPosY[ip]; sVelX[threadIdx.x] = dVelX[ip]; sVelY[threadIdx.x] = dVelY[ip]; sDensity[threadIdx.x] = dDensity[ip]; sPressure[threadIdx.x] = dPressure[ip]; __syncthreads(); for (il = 1; il < MAXN; il++) { jp = dList[ip + il * MAXP]; jt = jp - blockDim.x * blockIdx.x; if ((jt >= 0) && (jt < THREADS)) { jPosX = sPosX[jt]; jPosY = sPosY[jt]; jVelX = sVelX[jt]; jVelY = sVelY[jt]; jDensity = sDensity[jt]; jPressure = sPressure[jt]; } else { jPosX = dPosX[jp]; jPosY = dPosY[jp]; jVelX = dVelX[jp]; jVelY = dVelY[jp]; jDensity = dDensity[jp]; jPressure = dPressure[jp]; } dx = sPosX[threadIdx.x] - jPosX; dy = sPosY[threadIdx.x] - jPosY; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < 0.1 * dSmooth) dr = 100.0 * dSmooth; //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (sPosX[threadIdx.x] - jPosX) * (sVelX[threadIdx.x] - jVelX); dvr += (sPosY[threadIdx.x] - jPosY) * (sVelY[threadIdx.x] - jVelY); iDensityDot += dMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(sPressure[threadIdx.x] + jPressure) / (sDensity[threadIdx.x] * jDensity); iVelDotX += dMass * f * dwdr * (sPosX[threadIdx.x] - jPosX) / dr; iVelDotY += dMass * f * dwdr * (sPosY[threadIdx.x] - jPosY) / dr; // Calculate shock correction for mass f = sDensity[threadIdx.x] - jDensity; f *= 2.0 * dSound / (sDensity[threadIdx.x] + jDensity); iDensityDot += dMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0) f = dvr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (sDensity[threadIdx.x] + jDensity); f *= 0.03; iVelDotX += dMass * f * dwdr * (sPosX[threadIdx.x] - jPosX) / dr; iVelDotY += dMass * f * dwdr * (sPosY[threadIdx.x] - jPosY) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void kerInteraction(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; for (il = 1; il < MAXN; il++) { jp = dList[ip * MAXN + il]; dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < 0.1 * dSmooth) dr = 100.0 * dSmooth; //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (dPosX[ip] - dPosX[jp]) * (dVelX[ip] - dVelX[jp]); dvr += (dPosY[ip] - dPosY[jp]) * (dVelY[ip] - dVelY[jp]); iDensityDot += dMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(dPressure[ip] + dPressure[jp]) / (dDensity[ip] * dDensity[jp]); iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; // Calculate shock correction for mass f = dDensity[ip] - dDensity[jp]; f *= 2.0 * dSound / (dDensity[ip] + dDensity[jp]); iDensityDot += dMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0) f = dvr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (dDensity[ip] + dDensity[jp]); f *= 0.03; iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void kerInteraction0(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; for (il = 1; il < dList[ip]; il++) { jp = dList[ip * MAXN + il]; // for (jp = 0; jp < dPN; jp++) { dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (dPosX[ip] - dPosX[jp]) * (dVelX[ip] - dVelX[jp]); dvr += (dPosY[ip] - dPosY[jp]) * (dVelY[ip] - dVelY[jp]); if (ip != jp) dvr /= dr; else dvr = 0.0; iDensityDot += dMass * dvr * dwdr; //iDensityDot += dMass * dvr * dwdr * iDensity / sDensity[jt][threadIdx.y]; // Calculate interparticle pressure action f = -(dPressure[ip] + dPressure[jp]) / (dDensity[ip] * dDensity[jp]); if (ip != jp) iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; if (ip != jp) iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; /* // Calculate shock correction for mass f = dDensity[ip] - dDensity[jp]; f *= 2.0 * dSound / (dDensity[ip] + dDensity[jp]); iDensityDot += dMass * f * dwdr; */ // Calculate shock correction for momentum if (dvr < 0) f = dvr * dr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (dDensity[ip] + dDensity[jp]); f *= 0.03; if (ip != jp) iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; if (ip != jp) iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void balanceEnergy(const float* dPressure, const float* dDensity, const float* dDensityDot, float* dEnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iPressure = dPressure[ip]; iDensity = dDensity[ip]; iDensityDot = dDensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); dEnergyDot[ip] += iEnergyDot; } } __global__ void shockMomentum(const float* dDensity, const float* dDensityDot, float* dPressure) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; volatile float iDensity, iDensityDot, iPressure; volatile float iVelDiv; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensity = dDensity[ip]; iDensityDot = dDensityDot[ip]; iPressure = 0; iVelDiv = -iDensityDot / iDensity; if (iVelDiv < 0.0) { iPressure -= 1.0 * dSmooth * iDensity * dSound * iVelDiv; iPressure += 0.5 * dSmooth*dSmooth * iDensity * iVelDiv*iVelDiv; } dPressure[ip] += iPressure; } } __global__ void kerUpdate(const int* dMaterial, const float* dVelDotX, const float* dVelDotY, const float* dDensityDot, const float* dEnergyDot, const float alpha, const float* dPosX0, const float* dPosY0, const float* dVelX0, const float* dVelY0, const float* dDensity0, const float* dEnergy0, float* dPosX, float* dPosY, float* dVelX, float* dVelY, float* dDensity, float* dEnergy, float* dPressure) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float f; int iMaterial; float iDensity, iEnergy; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { f = dPosX0[ip] + alpha * (dPosX[ip] + dRun.dt * dVelX[ip] - dPosX0[ip]); /* if (f < dRun.minX) f += dRun.maxX - dRun.minX; if (f > dRun.maxX) f -= dRun.maxX - dRun.minX; */ dPosX[ip] = f; f = dPosY0[ip] + alpha * (dPosY[ip] + dRun.dt * dVelY[ip] - dPosY0[ip]); /* if (f < dRun.minY) f += dRun.maxY - dRun.minY; if (f > dRun.maxY) f -= dRun.maxY - dRun.minY; */ dPosY[ip] = f; f = dVelX0[ip] + alpha * (dVelX[ip] + dRun.dt * dVelDotX[ip] - dVelX0[ip]); dVelX[ip] = f; f = dVelY0[ip] + alpha * (dVelY[ip] + dRun.dt * dVelDotY[ip] - dVelY0[ip]); dVelY[ip] = f; f = dDensity0[ip] + alpha * (dDensity[ip] + dRun.dt * dDensityDot[ip] - dDensity0[ip]); dDensity[ip] = f; f = dEnergy0[ip] + alpha * (dEnergy[ip] + dRun.dt * dEnergyDot[ip] - dEnergy0[ip]); dEnergy[ip] = f; iMaterial = dMaterial[ip]; if (iMaterial < 0) { dVelX[ip] = dVelX0[ip]; dVelY[ip] = dVelY0[ip]; } iMaterial = abs(iMaterial); iDensity = dDensity[ip]; iEnergy = dEnergy[ip]; switch (dMatType[iMaterial]) { case (1) : // IDEAL GAS EOS dPressure[ip] = pressureGas(iMaterial, iDensity, iEnergy); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS dPressure[ip] = pressurePoly(iMaterial, iDensity, iEnergy); break; case (3) : // MIE-GRUNEISEN SHOCK EOS dPressure[ip] = pressureShock(iMaterial, iDensity, iEnergy); break; case (4) : // TAIT EOS dPressure[ip] = pressureTait(iMaterial, iDensity, iEnergy); break; default : dPressure[ip] = 0.0; } } } __global__ void updateForces(const int* dMaterial, float* dVelDotX, float* dVelDotY, float* dDensityDot, float* dEnergyDot) { int ip; int iMaterial; float iVelDotX, iVelDotY, iDensityDot, iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iVelDotX = 0.0; iVelDotY = 0.0; iDensityDot = 0.0; iEnergyDot = 0.0; iMaterial = dMaterial[ip]; if (iMaterial > 0) iVelDotY = -9.81; dVelDotX[ip] = iVelDotX; dVelDotY[ip] = iVelDotY; dDensityDot[ip] = iDensityDot; dEnergyDot[ip] = iEnergyDot; } } __global__ void updateBoundary(const int* dMaterial, float* dVelDotX, float* dVelDotY) { int ip; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iMaterial = dMaterial[ip]; if (iMaterial < 0) { dVelDotX[ip] = 0.0; dVelDotY[ip] = 0.0; } } } __global__ void kerSortInt(int* dArray, const int* dIndex, int* dIntDummy) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; dIntDummy[ip] = dArray[ip]; __syncthreads(); dArray[ip] = dIntDummy[dIndex[ip]]; __syncthreads(); } __global__ void kerSortFloat(float* dArray, const int* dIndex, float* dFloatDummy) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; dFloatDummy[ip] = dArray[ip]; __syncthreads(); dArray[ip] = dFloatDummy[dIndex[ip]]; __syncthreads(); } __global__ void kerHash(const struct grid dGrid, const float* dPosX, const float* dPosY, int* dHash, int* dIndex) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { ix = (int) ((dPosX[ip] - dGrid.oX) / dGrid.size); iy = (int) ((dPosY[ip] - dGrid.oY) / dGrid.size); ic = ix + iy * dGrid.nX; dHash[ip] = ic; dIndex[ip] = ip; } } __global__ void kerGrid(int *dSetStart, int *dSetStop, const int* dHash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip, ix, iy; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; hash = dHash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = dHash[ip -1]; } if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == THREADS -1) { if (ip == dPN -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = dHash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) dSetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) dSetStop[hash] = ip +1; } __global__ void kerList(int *dList, const int* dSetStart, const int* dSetStop, const struct grid dGrid, const float* dPosX, const float* dPosY) { int ip, ic, ix, iy, il, i, j, jp, jc, np; float dx, dy, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; ix = (int) ((dPosX[ip] - dGrid.oX) / dGrid.size); iy = (int) ((dPosY[ip] - dGrid.oY) / dGrid.size); ic = ix + iy * dGrid.nX; np = 0; for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * dGrid.nX; for (jp = dSetStart[jc]; jp < dSetStop[jc]; jp++) { dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2.0 * dSmooth) && (np < MAXN)) { dList[ip * MAXN + np] = jp; np++; } } } } while (np < MAXN) { dList[ip * MAXN + np] = ip; np++; } } // Host code int initHost() { hMaterial = (int *) malloc(MAXP * sizeof(int)); hPosX = (float *) malloc(MAXP * sizeof(float)); hPosY = (float *) malloc(MAXP * sizeof(float)); hVelX = (float *) malloc(MAXP * sizeof(float)); hVelY = (float *) malloc(MAXP * sizeof(float)); hDensity = (float *) malloc(MAXP * sizeof(float)); hEnergy = (float *) malloc(MAXP * sizeof(float)); hPressure = (float *) malloc(MAXP * sizeof(float)); hVelDotX = (float *) malloc(MAXP * sizeof(float)); hVelDotY = (float *) malloc(MAXP * sizeof(float)); hDensityDot = (float *) malloc(MAXP * sizeof(float)); hEnergyDot = (float *) malloc(MAXP * sizeof(float)); hList = (int *) malloc(MAXP * MAXN * sizeof(int)); hHash = (int *) malloc(MAXP * sizeof(int)); hIndex = (int *) malloc(MAXP * sizeof(int)); hSetStart = (int *) malloc(MAXG * sizeof(int)); hSetStop = (int *) malloc(MAXG * sizeof(int)); hGrid.set = (int *) malloc(MAXG * sizeof(int)); hGrid.nump = (int *) malloc(MAXG * sizeof(int)); hGrid.cell = (int *) malloc(MAXG * sizeof(int)); hGrid.perm = (int *) malloc(MAXP * sizeof(int)); return 0; } int initDevice() { cutilSafeCall( hipMalloc((void**) &(dMaterial), (MAXP * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dPosX), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dPosY), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelX), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelY), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dDensity), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dEnergy), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dPressure), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelDotX), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelDotY), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dDensityDot), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dEnergyDot), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dList), (MAXP * MAXN * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dHash), (MAXP * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dIndex), (MAXP * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dSetStart), (MAXG * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dSetStop), (MAXG * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dIntDummy), (MAXP * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dFloatDummy), (MAXP * sizeof(float))) ); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( hipMalloc((void**) &(dGrid.set), (MAXG * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dGrid.nump), (MAXG * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dGrid.cell), (MAXG * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dGrid.perm), (MAXP * sizeof(int))) ); cutilSafeCall( hipMalloc((void**) &(dPosX0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dPosY0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelX0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dVelY0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dDensity0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMalloc((void**) &(dEnergy0), (MAXP * sizeof(float))) ); cutilSafeCall( hipMemcpyToSymbol("dPN", &hPN, sizeof(int)) ); cutilSafeCall( hipMemcpyToSymbol("dSmooth", &hSmooth, sizeof(float)) ); cutilSafeCall( hipMemcpyToSymbol("dMass", &hMass, sizeof(float)) ); cutilSafeCall( hipMemcpyToSymbol("dSound", &hSound, sizeof(float)) ); cutilSafeCall( hipMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)) ); cutilSafeCall( hipMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)) ); cutilSafeCall( hipMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)) ); return 0; } int initCUDPP() { CUDPPConfiguration sortConfig; sortConfig.algorithm = CUDPP_SORT_RADIX; sortConfig.datatype = CUDPP_UINT; sortConfig.op = CUDPP_ADD; sortConfig.options = CUDPP_OPTION_KEY_VALUE_PAIRS; cudppPlan(&hSortHandle, sortConfig, hPN, 1, 0); return 0; } int copyHostToDevice() { cutilSafeCall( hipMemcpy(dMaterial, hMaterial, (MAXP * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dPosX, hPosX, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dPosY, hPosY, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dVelX, hVelX, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dVelY, hVelY, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dDensity, hDensity, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dEnergy, hEnergy, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dPressure, hPressure, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dVelDotX, hVelDotX, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dVelDotY, hVelDotY, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dDensityDot, hDensityDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dEnergyDot, hEnergyDot, (MAXP * sizeof(float)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dList, hList, (MAXP * MAXN * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dHash, hHash, (MAXP * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dIndex, hIndex, (MAXP * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dSetStart, hSetStart, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dSetStop, hSetStop, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( hipMemcpy(dGrid.set, hGrid.set, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.nump, hGrid.nump, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.cell, hGrid.cell, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.perm, hGrid.perm, (MAXP * sizeof(int)), hipMemcpyHostToDevice) ); return 0; } int copyDeviceToHost() { cutilSafeCall( hipMemcpy(hMaterial, dMaterial, (MAXP * sizeof(int)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hPosX, dPosX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hPosY, dPosY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hVelX, dVelX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hVelY, dVelY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hDensity, dDensity, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hEnergy, dEnergy, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hPressure, dPressure, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hVelDotX, dVelDotX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hVelDotY, dVelDotY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hDensityDot, dDensityDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hEnergyDot, dEnergyDot, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hList, dList, (MAXP * MAXN * sizeof(int)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hHash, dHash, (MAXP * sizeof(int)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hIndex, dIndex, (MAXP * sizeof(int)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hSetStart, dSetStart, (MAXG * sizeof(int)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hSetStop, dSetStop, (MAXG * sizeof(int)), hipMemcpyDeviceToHost) ); return 0; } int backupData() { cutilSafeCall( hipMemcpy(dPosX0, dPosX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); cutilSafeCall( hipMemcpy(dPosY0, dPosY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); cutilSafeCall( hipMemcpy(dVelX0, dVelX, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); cutilSafeCall( hipMemcpy(dVelY0, dVelY, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); cutilSafeCall( hipMemcpy(dDensity0, dDensity, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); cutilSafeCall( hipMemcpy(dEnergy0, dEnergy, (MAXP * sizeof(float)), hipMemcpyDeviceToDevice) ); return 0; } int initRun() { /** * \brief Input run data * * Reads the input file for run data * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char tok[10]; int i, m, p, pn; int iv; float fv; int mpn, mpp[10]; // Open stream file stream = fopen("armando.run", "r"); while (!feof(stream)) { sprintf(tok, " "); fscanf(stream, "%s", tok); if (strcmp(tok, "MAT") == 0) { fscanf(stream, "%i", &iv); if ((iv > 0) && (iv <= 50)) m = iv; for (p = 0; p < 10; p++) hMatProp[m][p] = 0.0; if ((m > 0) && (m <= 10)) pn = 3; if ((m > 10) && (m <= 20)) pn = 9; if ((m > 20) && (m <= 30)) pn = 10; if ((m > 30) && (m <= 40)) pn = 5; if ((m > 40) && (m <= 50)) pn = 3; for (p = 0; p < pn; p++) { fscanf(stream, "%f", &fv); hMatProp[m][p] = fv; } printf("Material %d\n", m); printf("hMatProp: \n"); for (p = 0; p < pn; p++) printf(" %f\n", hMatProp[m][p]); printf("\n"); } if (strcmp(tok, "TIME") == 0) { fscanf(stream, "%f", &fv); if (fv > 0.0) hRun.dt = fv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.tsn = iv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.ssi = iv; printf("Time step: %f\n", hRun.dt); printf("Steps: %i\n", hRun.tsn); printf("Save step: %i\n", hRun.ssi); printf("\n"); } if (strcmp(tok, "LIMITS") == 0) { fscanf(stream, "%f", &fv); hRun.minX = fv; fscanf(stream, "%f", &fv); hRun.maxX = fv; fscanf(stream, "%f", &fv); hRun.minY = fv; fscanf(stream, "%f", &fv); hRun.maxY = fv; printf("Domain limits: \n"); printf("X: %+e - %+e \n", hRun.minX, hRun.maxX); printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY); printf("\n"); } if (strcmp(tok, "MONITORS") == 0) { fscanf(stream, "%i", &iv); mpn = iv; for (i = 0; i < mpn; i++) { fscanf(stream, "%i", &iv); mpp[i] = iv; } printf("Monitored particles: %i \n", mpn); if (mpn > 0) { printf("Index:"); for (i = 0; i < mpn; i++) printf(" %i", mpp[i]); printf("\n"); printf("\n"); } } } fclose(stream); hSound = hSmooth / hRun.dt; return 0; } int scanData() { /** * \brief Input particle data file * * Reads particle data from a disk file * * \date Oct 20, 2010 * \author Luca Massidda */ FILE *stream; int i; float fv1, fv2, fv3; int iv; // Stream file position stream = fopen("in_pos.txt", "r"); for (i = 0; !feof(stream); i++) { fscanf(stream, "%e %e ", &fv1, &fv2); hPosX[i] = fv1; hPosY[i] = fv2; } fclose(stream); hPN = i; // Stream file velocity stream = fopen("in_vel.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e", &fv1, &fv2); hVelX[i] = fv1; hVelY[i] = fv2; } fclose(stream); // Stream file info stream = fopen("in_info.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2); hMaterial[i] = iv; hMass = fv1; hSmooth = fv2; } fclose(stream); // Stream file field stream = fopen("in_field.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e %e ", &fv1, &fv2, &fv3); hDensity[i] = fv1; hPressure[i] = fv2; hEnergy[i] = fv3; } fclose(stream); return 0; } int printData() { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("out_pos.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e\n", hPosX[i], hPosY[i]); fclose(stream); // Stream file velocity stream = fopen("out_vel.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e \n", hVelX[i], hVelY[i]); fclose(stream); // Stream file info stream = fopen("out_info.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hMaterial[i], hMass, hSmooth); fclose(stream); // Stream file field stream = fopen("out_field.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hDensity[i], hPressure[i], hEnergy[i]); fclose(stream); // Stream file add1 stream = fopen("out_debug.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e %+14.8e \n", hDensityDot[i], hVelDotX[i], hVelDotY[i], hEnergyDot[i]); fclose(stream); return 0; } int outputCase() { /** * \brief Output Case file * * Saves ensight case file * * \date Jul 5, 2010 * \author Luca Massidda */ FILE *stream; int ts; // Open stream file stream = fopen("armando.case", "w"); fprintf(stream, "# Ensight formatted case file for Armando\n"); fprintf(stream, "\n"); fprintf(stream, "FORMAT\n"); fprintf(stream, "type: ensight gold\n"); fprintf(stream, "\n"); fprintf(stream, "GEOMETRY\n"); fprintf(stream, "model: 1 armando_pos_*****.geo\n"); fprintf(stream, "\n"); fprintf(stream, "VARIABLE\n"); fprintf(stream, "vector per node: 1 velocity armando_vel_*****.dat\n"); fprintf(stream, "scalar per node: 1 density armando_rho_*****.dat\n"); fprintf(stream, "scalar per node: 1 pressure armando_pre_*****.dat\n"); fprintf(stream, "scalar per node: 1 energy armando_ene_*****.dat\n"); fprintf(stream, "\n"); fprintf(stream, "TIME\n"); fprintf(stream, "time set: %i\n", 1); fprintf(stream, "number of steps: %i\n", (hRun.tsn / hRun.ssi + 1)); fprintf(stream, "filename start number: %i\n", 0); fprintf(stream, "filename increment: %i\n", 1); fprintf(stream, "time values:\n"); for (ts = 0; ts <= hRun.tsn; ts++) if ((ts % hRun.ssi) == 0) fprintf(stream, "%14.8e\n", (ts * hRun.dt)); // Close stream file fclose(stream); return 0; } int outputData(int ss) { /** * \brief Output Data file * * Saves ensight data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "armando_pos_%05d.geo", ss); stream = fopen(filename, "w"); fprintf(stream, "Armando output in EnSight Gold format\n"); fprintf(stream, "EnSight 8.0.7\n"); fprintf(stream, "node id assign\n"); fprintf(stream, "element id assign\n"); fprintf(stream, "extents\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "SPH particles\n"); fprintf(stream, "coordinates\n"); fprintf(stream, "%10i\n", hPN); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPosX[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPosY[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", 0.0); fclose(stream); // Stream velocity file sprintf(filename, "armando_vel_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle velocity in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hVelX[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hVelY[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", 0.0); fclose(stream); // Stream density file sprintf(filename, "armando_rho_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle density in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hDensity[i]); fclose(stream); // Stream pressure file sprintf(filename, "armando_pre_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle pressure in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPressure[i]); fclose(stream); // Stream energy file sprintf(filename, "armando_ene_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle energy in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hEnergy[i]); fclose(stream); return 0; } void initDamBreak() { int i, j, m, pi; double rho, c0, pmin; double dr; m = 1; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; dr = 0.02; // x4 pi = 0; for (j = 0; j <= 50; j++) { for (i = 0; i <= 100; i++) { hPosX[pi] = i * dr + 0.5 * dr; hPosY[pi] = j * dr + 0.8 * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = m; hDensity[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } // 0 - 268 0 - 150 /* for (j = 151; j <= 153; j++) { for (i = -3; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } */ for (j = -3; j <= -1; j++) { for (i = -3; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } for (j = -0; j <= 80; j++) { for (i = -3; i <= -1; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } for (j = -0; j <= 80; j++) { for (i = 269; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } hPN = pi; hSmooth = 1.2 * dr; hMass = rho * dr * dr; hSound = c0; hRun.minX = -1.0; hRun.maxX = 6.0; hRun.minY = -1.0; hRun.maxY = 4.0; hRun.dt = 4.0e-4; //1.0e-3; hRun.tsn = 4000; //1000; hRun.ssi = 200; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.size = 2.0 * hSmooth; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; printf("Dam break in a box \n"); printf("Particles: %i \n", hPN); } void initFree() { int i, j, m, pi; double rho, c0, pmin; double dr; m = 1; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; dr = 0.1; // x4 pi = 0; for (j = 0; j < 10; j++) { for (i = 0; i < 10; i++) { hPosX[pi] = i * dr + 0.0 * dr; hPosY[pi] = j * dr + 0.0 * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = m; hDensity[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; pi++; } } hPN = pi; hSmooth = 1.2 * dr; hMass = rho * dr * dr; hSound = c0; hRun.minX = -0.5; hRun.maxX = 1.5; hRun.minY = -0.5; hRun.maxY = 1.5; hRun.dt = 0.5e-2; //1.0e-3; hRun.tsn = 2; //1000; hRun.ssi = 1; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.size = 2.1 * hSmooth; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; printf("Freefall\n"); printf("Particles: %i \n", hPN); } int iSort(int *array, int *perm, int n) { int i; static int* dummy = NULL; if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int updateGrid(void) { int i, j, ix, iy; int maxnump; cutilSafeCall( hipMemcpy(hPosX, dPosX, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); cutilSafeCall( hipMemcpy(hPosY, dPosY, (MAXP * sizeof(float)), hipMemcpyDeviceToHost) ); // Set to zero the index vector of the grid data structure for (i = 0; i <= hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = 0; hGrid.nump[i] = 0; } // The index vector is used to store the number of particles // in each grid cell for (i = 0; i < hPN; i++) { ix = (int) ((hPosX[i] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[i] - hGrid.oY) / hGrid.size); hGrid.nump[ix + iy * hGrid.nX]++; } // The index vector points at the beginning of the particle list // in the grid data structure hGrid.set[0] = 0; for (i = 1; i < hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = hGrid.set[i -1] + hGrid.nump[i -1]; } // The data vector for particles is filled for (i = 0; i < hPN; i++) { ix = (int) ((hPosX[i] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[i] - hGrid.oY) / hGrid.size); j = hGrid.set[ix + iy * hGrid.nX]; hGrid.perm[j] = i; hGrid.set[ix + iy * hGrid.nX]++; } // The index vector points at the beginning of the particle list // in the grid data structure hGrid.set[0] = 0; for (i = 1; i < hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = hGrid.set[i -1] + hGrid.nump[i -1]; } // The cell vector points at the grid position hGrid.cell[0] = 0; j = 0; for (i = 0; i < hGrid.nX * hGrid.nY; i++) { if (hGrid.nump[i] > 0) { hGrid.cell[j] = i; j++; } } hGrid.SN = j; maxnump = 0; for (i = 0; i < hGrid.nX * hGrid.nY; i++) if (hGrid.nump[i] > maxnump) maxnump = hGrid.nump[i]; if (maxnump > ParticlesInSet) printf("Error: Particles in cell limit exceeded. %d > %d\n", maxnump, ParticlesInSet); //printf("Debug: maxnump %d\n", maxnump); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( hipMemcpy(dGrid.set, hGrid.set, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.nump, hGrid.nump, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.cell, hGrid.cell, (MAXG * sizeof(int)), hipMemcpyHostToDevice) ); cutilSafeCall( hipMemcpy(dGrid.perm, hGrid.perm, (MAXP * sizeof(int)), hipMemcpyHostToDevice) ); return 0; } int sortArrays(void) { int blocks, threads; threads = THREADS; blocks = (hPN + threads - 1) / threads; // Particles are re ordered hipLaunchKernelGGL(( kerSortInt) , dim3(blocks), dim3(threads) , 0, 0, dMaterial, dIndex, dIntDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dPosX, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dPosY, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dVelX, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dVelY, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dDensity, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dEnergy, dIndex, dFloatDummy); hipLaunchKernelGGL(( kerSortFloat) , dim3(blocks), dim3(threads) , 0, 0, dPressure, dIndex, dFloatDummy); /* iSort(hMaterial, hGrid.perm, hPN); fSort(hPosX, hGrid.perm, hPN); fSort(hPosY, hGrid.perm, hPN); fSort(hVelX, hGrid.perm, hPN); fSort(hVelY, hGrid.perm, hPN); fSort(hDensity, hGrid.perm, hPN); fSort(hEnergy, hGrid.perm, hPN); fSort(hPressure, hGrid.perm, hPN); fSort(hVelDotX, hGrid.perm, hPN); fSort(hVelDotY, hGrid.perm, hPN); fSort(hDensityDot, hGrid.perm, hPN); fSort(hEnergyDot, hGrid.perm, hPN); */ return 0; } int updateList(void) { int ip, ic, ix, iy, il, i, j, jp, jc; float dx, dy, dr; // Particles list is filled for (ip = 0; ip < hPN; ip++) { hList[ip] = 0; for (il = 1; il < MAXN; il++) { hList[ip + il * MAXP] = ip; } ix = (int) ((hPosX[ip] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[ip] - hGrid.oY) / hGrid.size); ic = ix + iy * hGrid.nX; /* for (jp = 0; jp < hPN; jp++) { dx = hPosX[ip] - hPosX[jp]; dy = hPosY[ip] - hPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2*hSmooth) && (hList[ip] < MAXN -1)) { hList[ip]++; hList[ip + hList[ip] * MAXP] = jp; } } */ for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * hGrid.nX; for (il = 0; il < hGrid.nump[jc]; il++) { jp = hGrid.perm[hGrid.set[jc] + il]; dx = hPosX[ip] - hPosX[jp]; dy = hPosY[ip] - hPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2.0 * hSmooth) && (hList[ip] < MAXN -1)) { hList[ip]++; hList[ip + hList[ip] * MAXP] = jp; } } } } } /* printf("hGrid\n"); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("cell %d : %d\n", ic, hGrid.cell[ic]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("set %d : %d\n", ic, hGrid.set[ic]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("nump %d : %d\n", ic, hGrid.nump[ic]); for (ip = 0; ip < hPN; ip++) printf("perm %d : %d\n", ip, hGrid.perm[ip]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) if (hGrid.nump[ic] >0) { printf("%d ", hGrid.nump[ic]); for (il = 0; il < hGrid.nump[ic]; il++) { printf("%d ", hGrid.set[ic] + il); } printf("\n"); } printf("\n"); for (ip = 0; ip < hPN; ip++) { for (il = 0; il < MAXN; il++) { printf("%d ", hList[ip + il * MAXP]); } printf("\n"); } printf("\n"); */ cutilSafeCall( hipMemcpy(dList, hList, (MAXP * MAXN * sizeof(int)), hipMemcpyHostToDevice) ); return 0; } int integrateRungeKutta3(void) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // Output outputCase(); copyHostToDevice(); int blocks1 = (hPN + THREADS - 1) / THREADS; int threads1 = THREADS; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); copyDeviceToHost(); printData(); outputData(ts / hRun.ssi); } // ******* DA FARE ******* // salvare gli output dei monitor su vettori velocizza molto l'esecuzione // e permette una pi efficace parallelizzazione!!! //output_monitor(pn, p0, dt * ts, mpn, mpp); // Calculate neighbouring particles hipLaunchKernelGGL(( kerHash) , dim3(blocks1), dim3(THREADS) , 0, 0, dGrid, dPosX, dPosY, dHash, dIndex); cudppSort(hSortHandle, dHash, dIndex, 18, hPN); cutilSafeCall( hipMemset(dSetStart, -1, MAXG * sizeof(int))); cutilSafeCall( hipMemset(dSetStop, -1, MAXG * sizeof(int))); hipLaunchKernelGGL(( kerGrid) , dim3(blocks1), dim3(THREADS) , 0, 0, dSetStart, dSetStop, dHash); sortArrays(); hipLaunchKernelGGL(( kerList) , dim3(blocks1), dim3(THREADS) , 0, 0, dList, dSetStart, dSetStop, dGrid, dPosX, dPosY); backupData(); // Step 1 // External forces hipLaunchKernelGGL(( updateForces) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions hipLaunchKernelGGL(( kerInteraction) , dim3(blocks1), dim3(threads1) , 0, 0, dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); hipLaunchKernelGGL(( balanceEnergy) , dim3(blocks1), dim3(threads1) , 0, 0, dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles hipLaunchKernelGGL(( kerUpdate) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 1.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); // Step 2 // External forces hipLaunchKernelGGL(( updateForces) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions hipLaunchKernelGGL(( kerInteraction) , dim3(blocks1), dim3(threads1) , 0, 0, dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); hipLaunchKernelGGL(( balanceEnergy) , dim3(blocks1), dim3(threads1) , 0, 0, dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles hipLaunchKernelGGL(( kerUpdate) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 1.0 / 4.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); // Step 3 // External forces hipLaunchKernelGGL(( updateForces) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions hipLaunchKernelGGL(( kerInteraction) , dim3(blocks1), dim3(threads1) , 0, 0, dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); hipLaunchKernelGGL(( balanceEnergy) , dim3(blocks1), dim3(threads1) , 0, 0, dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles hipLaunchKernelGGL(( kerUpdate) , dim3(blocks1), dim3(threads1) , 0, 0, dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 2.0 / 3.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); } cutilSafeCall( hipDeviceReset() ); return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date Oct 20, 2010 * \author Luca Massidda */ initHost(); initDamBreak(); //initFree(); initDevice(); initCUDPP(); integrateRungeKutta3(); return 0; }
19af756bd4b390004bd2ff5de90f90f3906e80e5.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cutil_inline.h> #include <cudpp.h> #define MAXP 10000 #define MAXN 21 #define MAXG 128000 #define THREADS 256 #define ParticlesInSet 12 #define SetsInBlock 20 struct grid { float oX, oY; float size; int nX, nY; int SN; int* set; int* nump; int* cell; int* perm; }; struct simulation { float minX, maxX; float minY, maxY; float dt; int tsn; int ssi; int nsi; }; // Host Variables int *hMaterial; float *hPosX; float *hPosY; float *hVelX; float *hVelY; float *hDensity; float *hEnergy; float *hPressure; float *hVelDotX; float *hVelDotY; float *hDensityDot; float *hEnergyDot; int *hList; int *hHash; int *hIndex; int *hSetStart; int *hSetStop; int hPN; float hSmooth, hMass, hSound; int hMatType[10]; float hMatProp[10][10]; struct simulation hRun; struct grid hGrid; CUDPPHandle hSortHandle; // Device Variables __device__ int *dMaterial; __device__ float *dPosX; __device__ float *dPosY; __device__ float *dVelX; __device__ float *dVelY; __device__ float *dDensity; __device__ float *dEnergy; __device__ float *dPressure; __device__ float *dVelDotX; __device__ float *dVelDotY; __device__ float *dDensityDot; __device__ float *dEnergyDot; __device__ int *dList; __device__ int *dHash; __device__ int *dIndex; __device__ int *dSetStart; __device__ int *dSetStop; __device__ int *dIntDummy; __device__ float *dFloatDummy; __device__ __constant__ int dPN; __device__ __constant__ float dSmooth, dMass, dSound; __device__ __constant__ int dMatType[10]; __device__ __constant__ float dMatProp[10][10]; __device__ __constant__ struct simulation dRun; __device__ struct grid dGrid; __device__ float *dPosX0; __device__ float *dPosY0; __device__ float *dVelX0; __device__ float *dVelY0; __device__ float *dDensity0; __device__ float *dEnergy0; // Device code __device__ float kernelWendland(float r, float h) { float q, alpha, w; /** * \brief Wendland kernel * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D //alpha = 21.0 / (16.0 * M_PI * h * h * h); // for 2D alpha = 7.0 / (4.0 * M_PI * h * h); w = 0.0; if (q < 2) { w = 1.0 - 0.5*q; w *= w; w *= w; w *= 1.0 + 2.0*q; w *= alpha; } return w; } __device__ float kernelDerivWendland(float r, float h) { float q, alpha, dwdr; /** * \brief Wendland kernel derivative * * \date Feb 8, 2011 * \author Luca Massidda */ q = r / h; // for 3D //alpha = 21.0 / (16.0 * M_PI * h * h * h); // for 2D alpha = 7.0 / (4.0 * M_PI * h * h); dwdr = 0; if (q < 2) { dwdr = 5.0 / 8.0 * q * pow((q - 2.0), 3) ; dwdr *= alpha / h; } return dwdr; } __device__ float kernelGauss(float r, float h) { float r2, q2, h2, alpha, w;//, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; //alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); alpha = 1.0 / (3.14 * h2); w = 0.0; //dwdr = 0.0; if (q2 < 4.0) { w = alpha * expf(-q2); //dwdr = w * (-2.0 * r / h2); } return w; } __device__ float kernelDerivGauss(float r, float h) { float r2, q2, h2, alpha, w, dwdr; /** * \brief Gauss kernel * * \date Dec 21, 2010 * \author Luca Massidda */ r2 = r * r ; h2 = h * h; q2 = r2 / h2; //alpha = 1.0 / (pow(h, 1) * pow(3.14, 0.5)); alpha = 1.0 / (3.14 * h2); w = 0.0; dwdr = 0.0; if (q2 < 4.0) { w = alpha * expf(-q2); dwdr = w * (-2.0 * r / h2); } return dwdr; } __device__ float pressureGas(int mat ,float rho, float u) { /** * \brief Ideal gas Equation Of State * * p = (k -1) rho u * c = (k(k -1) u)^0.5 * * k = dMatProp[mat][1] * pshift = dMatProp[mat][2] * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = (dMatProp[mat][1] - 1.0) * rho * u; p += dMatProp[mat][2]; // c = sqrtf(dMatProp[mat][1] * (dMatProp[mat][1] - 1.0) * u); return p; } __device__ float pressurePoly(int mat , float rho, float u) { /** * \brief Mie-Gruneisen polynomial Equation Of State * * p = a1 mu + a2 mu^2 + a3 mu^3 + (b0 + b1 mu) rho0 u in compression * p = t1 mu + t2 mu^2 + b0 rho0 u in tension * * rho0 = dMatProp[mat][0]; * a1 = dMatProp[mat][1]; * a2 = dMatProp[mat][2]; * a3 = dMatProp[mat][3]; * b0 = dMatProp[mat][4]; * b1 = dMatProp[mat][5]; * t1 = dMatProp[mat][6]; * t2 = dMatProp[mat][7]; * pmin = dMatProp[mat][8]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p; // float c; mu = (rho - dMatProp[mat][0]) / dMatProp[mat][0]; if (mu < 0) p = (dMatProp[mat][6] * mu + dMatProp[mat][7] * mu*mu) + (dMatProp[mat][4] * dMatProp[mat][0] * u); else p = (dMatProp[mat][1] * mu + dMatProp[mat][2] * mu*mu + dMatProp[mat][3] * mu*mu*mu) + ((dMatProp[mat][4] + dMatProp[mat][5] * mu) * dMatProp[mat][0] * u); if (p < dMatProp[mat][8]) p = dMatProp[mat][8]; // c = sqrtf(dMatProp[mat][1] / rho); return p; } __device__ float pressureShock(int mat, float rho, float u) { /** * \brief Mie-Gruneisen Shock Hugoniot Equation Of State * * mu = rho / rho0 -1 * g = g * rho0 / rho * ph = (rho0 c0^2 mu (1 + mu)) / (1 - (s0 - 1) * mu)^2 * uh = 1/2 ph/rho0 * (mu / (1 + mu)) * p = ph + g * rho * (u - uh) * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * g0 = dMatProp[mat][2]; * s0 = dMatProp[mat][3]; * pmin = dMatProp[mat][4]; * * \date Jun 10, 2010 * \author Luca Massidda */ float mu; float p, ph; // float c; mu = (rho - dMatProp[mat][0]) / dMatProp[mat][0]; ph = (dMatProp[mat][0] * powf(dMatProp[mat][1], 2) * mu*(1.0 +mu)) / powf((1.0 - (dMatProp[mat][3] -1.0) * mu), 2); p = ph + dMatProp[mat][2] * dMatProp[mat][0] * (u - (0.5 * ph / dMatProp[mat][0] * (mu / (1.0 + mu)))); if (p < dMatProp[mat][4]) p = dMatProp[mat][4]; // c = dMatProp[mat][1]; return p; } __device__ float pressureTait(int mat, float rho, float u) { /** * \brief Tait Equation Of State * * p = rho0 * c0 * c0 / 7.0 * (powf((rho / rho0), 7) - 1.0); * c = c0; * * rho0 = dMatProp[mat][0]; * c0 = dMatProp[mat][1]; * pmin = dMatProp[mat][2]; * * \date Jun 10, 2010 * \author Luca Massidda */ float p; // float c; p = dMatProp[mat][0] * powf(dMatProp[mat][1], 2) / 7.0 * (powf((rho / dMatProp[mat][0]), 7) - 1.0); if (p < dMatProp[mat][2]) p = dMatProp[mat][2]; // c = dMatProp[mat][1]; return p; } // Global code __global__ void kerInteraction2(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ __shared__ float sPosX[THREADS]; __shared__ float sPosY[THREADS]; __shared__ float sVelX[THREADS]; __shared__ float sVelY[THREADS]; __shared__ float sDensity[THREADS]; __shared__ float sPressure[THREADS]; int ip, il, jp, jt; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; float jPosX, jPosY; float jVelX, jVelY; float jDensity, jPressure; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; sPosX[threadIdx.x] = dPosX[ip]; sPosY[threadIdx.x] = dPosY[ip]; sVelX[threadIdx.x] = dVelX[ip]; sVelY[threadIdx.x] = dVelY[ip]; sDensity[threadIdx.x] = dDensity[ip]; sPressure[threadIdx.x] = dPressure[ip]; __syncthreads(); for (il = 1; il < MAXN; il++) { jp = dList[ip + il * MAXP]; jt = jp - blockDim.x * blockIdx.x; if ((jt >= 0) && (jt < THREADS)) { jPosX = sPosX[jt]; jPosY = sPosY[jt]; jVelX = sVelX[jt]; jVelY = sVelY[jt]; jDensity = sDensity[jt]; jPressure = sPressure[jt]; } else { jPosX = dPosX[jp]; jPosY = dPosY[jp]; jVelX = dVelX[jp]; jVelY = dVelY[jp]; jDensity = dDensity[jp]; jPressure = dPressure[jp]; } dx = sPosX[threadIdx.x] - jPosX; dy = sPosY[threadIdx.x] - jPosY; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < 0.1 * dSmooth) dr = 100.0 * dSmooth; //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (sPosX[threadIdx.x] - jPosX) * (sVelX[threadIdx.x] - jVelX); dvr += (sPosY[threadIdx.x] - jPosY) * (sVelY[threadIdx.x] - jVelY); iDensityDot += dMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(sPressure[threadIdx.x] + jPressure) / (sDensity[threadIdx.x] * jDensity); iVelDotX += dMass * f * dwdr * (sPosX[threadIdx.x] - jPosX) / dr; iVelDotY += dMass * f * dwdr * (sPosY[threadIdx.x] - jPosY) / dr; // Calculate shock correction for mass f = sDensity[threadIdx.x] - jDensity; f *= 2.0 * dSound / (sDensity[threadIdx.x] + jDensity); iDensityDot += dMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0) f = dvr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (sDensity[threadIdx.x] + jDensity); f *= 0.03; iVelDotX += dMass * f * dwdr * (sPosX[threadIdx.x] - jPosX) / dr; iVelDotY += dMass * f * dwdr * (sPosY[threadIdx.x] - jPosY) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void kerInteraction(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; for (il = 1; il < MAXN; il++) { jp = dList[ip * MAXN + il]; dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); if (dr < 0.1 * dSmooth) dr = 100.0 * dSmooth; //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (dPosX[ip] - dPosX[jp]) * (dVelX[ip] - dVelX[jp]); dvr += (dPosY[ip] - dPosY[jp]) * (dVelY[ip] - dVelY[jp]); iDensityDot += dMass * dvr * dwdr / dr; // Calculate interparticle pressure action f = -(dPressure[ip] + dPressure[jp]) / (dDensity[ip] * dDensity[jp]); iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; // Calculate shock correction for mass f = dDensity[ip] - dDensity[jp]; f *= 2.0 * dSound / (dDensity[ip] + dDensity[jp]); iDensityDot += dMass * f * dwdr; // Calculate shock correction for momentum if (dvr < 0) f = dvr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (dDensity[ip] + dDensity[jp]); f *= 0.03; iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void kerInteraction0(const int* dList, const float* dPosX, const float* dPosY, const float* dVelX, const float* dVelY, const float* dDensity, const float* dPressure, float* dDensityDot, float* dVelDotX, float* dVelDotY) { /** * \brief Interate particles * * \date Jan 6, 2011 * \author Luca Massidda */ int ip, il, jp; float iDensityDot; float iVelDotX, iVelDotY, iVelDotZ; volatile float dx, dy, dz, dr, dvr, dwdr, f; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensityDot = 0.0; iVelDotX = 0.0; iVelDotY = 0.0; iVelDotZ = 0.0; for (il = 1; il < dList[ip]; il++) { jp = dList[ip * MAXN + il]; // for (jp = 0; jp < dPN; jp++) { dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dz = 0.0; dr = sqrtf(dx * dx + dy * dy + dz * dz); //dwdr = kernelDerivGauss(dr, dSmooth); dwdr = kernelDerivWendland(dr, dSmooth); dvr = 0.0; dvr += (dPosX[ip] - dPosX[jp]) * (dVelX[ip] - dVelX[jp]); dvr += (dPosY[ip] - dPosY[jp]) * (dVelY[ip] - dVelY[jp]); if (ip != jp) dvr /= dr; else dvr = 0.0; iDensityDot += dMass * dvr * dwdr; //iDensityDot += dMass * dvr * dwdr * iDensity / sDensity[jt][threadIdx.y]; // Calculate interparticle pressure action f = -(dPressure[ip] + dPressure[jp]) / (dDensity[ip] * dDensity[jp]); if (ip != jp) iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; if (ip != jp) iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; /* // Calculate shock correction for mass f = dDensity[ip] - dDensity[jp]; f *= 2.0 * dSound / (dDensity[ip] + dDensity[jp]); iDensityDot += dMass * f * dwdr; */ // Calculate shock correction for momentum if (dvr < 0) f = dvr * dr; else f = 0.0; f *= dSmooth / (dr * dr + 0.01 * dSmooth * dSmooth); f *= 2. * dSound / (dDensity[ip] + dDensity[jp]); f *= 0.03; if (ip != jp) iVelDotX += dMass * f * dwdr * (dPosX[ip] - dPosX[jp]) / dr; if (ip != jp) iVelDotY += dMass * f * dwdr * (dPosY[ip] - dPosY[jp]) / dr; } dDensityDot[ip] += iDensityDot; dVelDotX[ip] += iVelDotX; dVelDotY[ip] += iVelDotY; } } __global__ void balanceEnergy(const float* dPressure, const float* dDensity, const float* dDensityDot, float* dEnergyDot) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; float iPressure, iDensity, iDensityDot; float iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iPressure = dPressure[ip]; iDensity = dDensity[ip]; iDensityDot = dDensityDot[ip]; iEnergyDot = (iPressure * iDensityDot) / (iDensity * iDensity); dEnergyDot[ip] += iEnergyDot; } } __global__ void shockMomentum(const float* dDensity, const float* dDensityDot, float* dPressure) { /** * \brief Interate particles * * \date Jan 9, 2011 * \author Luca Massidda */ volatile int ip; volatile float iDensity, iDensityDot, iPressure; volatile float iVelDiv; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iDensity = dDensity[ip]; iDensityDot = dDensityDot[ip]; iPressure = 0; iVelDiv = -iDensityDot / iDensity; if (iVelDiv < 0.0) { iPressure -= 1.0 * dSmooth * iDensity * dSound * iVelDiv; iPressure += 0.5 * dSmooth*dSmooth * iDensity * iVelDiv*iVelDiv; } dPressure[ip] += iPressure; } } __global__ void kerUpdate(const int* dMaterial, const float* dVelDotX, const float* dVelDotY, const float* dDensityDot, const float* dEnergyDot, const float alpha, const float* dPosX0, const float* dPosY0, const float* dVelX0, const float* dVelY0, const float* dDensity0, const float* dEnergy0, float* dPosX, float* dPosY, float* dVelX, float* dVelY, float* dDensity, float* dEnergy, float* dPressure) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; float f; int iMaterial; float iDensity, iEnergy; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { f = dPosX0[ip] + alpha * (dPosX[ip] + dRun.dt * dVelX[ip] - dPosX0[ip]); /* if (f < dRun.minX) f += dRun.maxX - dRun.minX; if (f > dRun.maxX) f -= dRun.maxX - dRun.minX; */ dPosX[ip] = f; f = dPosY0[ip] + alpha * (dPosY[ip] + dRun.dt * dVelY[ip] - dPosY0[ip]); /* if (f < dRun.minY) f += dRun.maxY - dRun.minY; if (f > dRun.maxY) f -= dRun.maxY - dRun.minY; */ dPosY[ip] = f; f = dVelX0[ip] + alpha * (dVelX[ip] + dRun.dt * dVelDotX[ip] - dVelX0[ip]); dVelX[ip] = f; f = dVelY0[ip] + alpha * (dVelY[ip] + dRun.dt * dVelDotY[ip] - dVelY0[ip]); dVelY[ip] = f; f = dDensity0[ip] + alpha * (dDensity[ip] + dRun.dt * dDensityDot[ip] - dDensity0[ip]); dDensity[ip] = f; f = dEnergy0[ip] + alpha * (dEnergy[ip] + dRun.dt * dEnergyDot[ip] - dEnergy0[ip]); dEnergy[ip] = f; iMaterial = dMaterial[ip]; if (iMaterial < 0) { dVelX[ip] = dVelX0[ip]; dVelY[ip] = dVelY0[ip]; } iMaterial = abs(iMaterial); iDensity = dDensity[ip]; iEnergy = dEnergy[ip]; switch (dMatType[iMaterial]) { case (1) : // IDEAL GAS EOS dPressure[ip] = pressureGas(iMaterial, iDensity, iEnergy); break; case (2) : // MIE-GRUNEISEN POLYNOMIAL EOS dPressure[ip] = pressurePoly(iMaterial, iDensity, iEnergy); break; case (3) : // MIE-GRUNEISEN SHOCK EOS dPressure[ip] = pressureShock(iMaterial, iDensity, iEnergy); break; case (4) : // TAIT EOS dPressure[ip] = pressureTait(iMaterial, iDensity, iEnergy); break; default : dPressure[ip] = 0.0; } } } __global__ void updateForces(const int* dMaterial, float* dVelDotX, float* dVelDotY, float* dDensityDot, float* dEnergyDot) { int ip; int iMaterial; float iVelDotX, iVelDotY, iDensityDot, iEnergyDot; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iVelDotX = 0.0; iVelDotY = 0.0; iDensityDot = 0.0; iEnergyDot = 0.0; iMaterial = dMaterial[ip]; if (iMaterial > 0) iVelDotY = -9.81; dVelDotX[ip] = iVelDotX; dVelDotY[ip] = iVelDotY; dDensityDot[ip] = iDensityDot; dEnergyDot[ip] = iEnergyDot; } } __global__ void updateBoundary(const int* dMaterial, float* dVelDotX, float* dVelDotY) { int ip; int iMaterial; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { iMaterial = dMaterial[ip]; if (iMaterial < 0) { dVelDotX[ip] = 0.0; dVelDotY[ip] = 0.0; } } } __global__ void kerSortInt(int* dArray, const int* dIndex, int* dIntDummy) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; dIntDummy[ip] = dArray[ip]; __syncthreads(); dArray[ip] = dIntDummy[dIndex[ip]]; __syncthreads(); } __global__ void kerSortFloat(float* dArray, const int* dIndex, float* dFloatDummy) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; dFloatDummy[ip] = dArray[ip]; __syncthreads(); dArray[ip] = dFloatDummy[dIndex[ip]]; __syncthreads(); } __global__ void kerHash(const struct grid dGrid, const float* dPosX, const float* dPosY, int* dHash, int* dIndex) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ int ip, ix, iy, ic; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip < dPN) { ix = (int) ((dPosX[ip] - dGrid.oX) / dGrid.size); iy = (int) ((dPosY[ip] - dGrid.oY) / dGrid.size); ic = ix + iy * dGrid.nX; dHash[ip] = ic; dIndex[ip] = ip; } } __global__ void kerGrid(int *dSetStart, int *dSetStop, const int* dHash) { /** * \brief Update particles * * \date Jan 6, 2010 * \author Luca Massidda */ __shared__ int prevHash[THREADS]; __shared__ int nextHash[THREADS]; int ip, ix, iy; int hash; ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; hash = dHash[ip]; if (threadIdx.x < THREADS -1) prevHash[threadIdx.x +1] = hash; if (threadIdx.x == 0) { if (ip == 0) prevHash[threadIdx.x] = -1; else prevHash[threadIdx.x] = dHash[ip -1]; } if (threadIdx.x > 0) nextHash[threadIdx.x -1] = hash; if (threadIdx.x == THREADS -1) { if (ip == dPN -1) nextHash[threadIdx.x] = -1; else nextHash[threadIdx.x] = dHash[ip +1]; } __syncthreads(); if (hash != prevHash[threadIdx.x]) dSetStart[hash] = ip; if (hash != nextHash[threadIdx.x]) dSetStop[hash] = ip +1; } __global__ void kerList(int *dList, const int* dSetStart, const int* dSetStop, const struct grid dGrid, const float* dPosX, const float* dPosY) { int ip, ic, ix, iy, il, i, j, jp, jc, np; float dx, dy, dr; // Particles list is filled ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= dPN) return; ix = (int) ((dPosX[ip] - dGrid.oX) / dGrid.size); iy = (int) ((dPosY[ip] - dGrid.oY) / dGrid.size); ic = ix + iy * dGrid.nX; np = 0; for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * dGrid.nX; for (jp = dSetStart[jc]; jp < dSetStop[jc]; jp++) { dx = dPosX[ip] - dPosX[jp]; dy = dPosY[ip] - dPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2.0 * dSmooth) && (np < MAXN)) { dList[ip * MAXN + np] = jp; np++; } } } } while (np < MAXN) { dList[ip * MAXN + np] = ip; np++; } } // Host code int initHost() { hMaterial = (int *) malloc(MAXP * sizeof(int)); hPosX = (float *) malloc(MAXP * sizeof(float)); hPosY = (float *) malloc(MAXP * sizeof(float)); hVelX = (float *) malloc(MAXP * sizeof(float)); hVelY = (float *) malloc(MAXP * sizeof(float)); hDensity = (float *) malloc(MAXP * sizeof(float)); hEnergy = (float *) malloc(MAXP * sizeof(float)); hPressure = (float *) malloc(MAXP * sizeof(float)); hVelDotX = (float *) malloc(MAXP * sizeof(float)); hVelDotY = (float *) malloc(MAXP * sizeof(float)); hDensityDot = (float *) malloc(MAXP * sizeof(float)); hEnergyDot = (float *) malloc(MAXP * sizeof(float)); hList = (int *) malloc(MAXP * MAXN * sizeof(int)); hHash = (int *) malloc(MAXP * sizeof(int)); hIndex = (int *) malloc(MAXP * sizeof(int)); hSetStart = (int *) malloc(MAXG * sizeof(int)); hSetStop = (int *) malloc(MAXG * sizeof(int)); hGrid.set = (int *) malloc(MAXG * sizeof(int)); hGrid.nump = (int *) malloc(MAXG * sizeof(int)); hGrid.cell = (int *) malloc(MAXG * sizeof(int)); hGrid.perm = (int *) malloc(MAXP * sizeof(int)); return 0; } int initDevice() { cutilSafeCall( cudaMalloc((void**) &(dMaterial), (MAXP * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dPosX), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dPosY), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelX), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelY), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dDensity), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dEnergy), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dPressure), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelDotX), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelDotY), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dDensityDot), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dEnergyDot), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dList), (MAXP * MAXN * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dHash), (MAXP * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dIndex), (MAXP * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dSetStart), (MAXG * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dSetStop), (MAXG * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dIntDummy), (MAXP * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dFloatDummy), (MAXP * sizeof(float))) ); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( cudaMalloc((void**) &(dGrid.set), (MAXG * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dGrid.nump), (MAXG * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dGrid.cell), (MAXG * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dGrid.perm), (MAXP * sizeof(int))) ); cutilSafeCall( cudaMalloc((void**) &(dPosX0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dPosY0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelX0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dVelY0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dDensity0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMalloc((void**) &(dEnergy0), (MAXP * sizeof(float))) ); cutilSafeCall( cudaMemcpyToSymbol("dPN", &hPN, sizeof(int)) ); cutilSafeCall( cudaMemcpyToSymbol("dSmooth", &hSmooth, sizeof(float)) ); cutilSafeCall( cudaMemcpyToSymbol("dMass", &hMass, sizeof(float)) ); cutilSafeCall( cudaMemcpyToSymbol("dSound", &hSound, sizeof(float)) ); cutilSafeCall( cudaMemcpyToSymbol("dRun", &hRun, sizeof(struct simulation)) ); cutilSafeCall( cudaMemcpyToSymbol("dMatType", hMatType, 10 * sizeof(int)) ); cutilSafeCall( cudaMemcpyToSymbol("dMatProp", hMatProp, 100 * sizeof(float)) ); return 0; } int initCUDPP() { CUDPPConfiguration sortConfig; sortConfig.algorithm = CUDPP_SORT_RADIX; sortConfig.datatype = CUDPP_UINT; sortConfig.op = CUDPP_ADD; sortConfig.options = CUDPP_OPTION_KEY_VALUE_PAIRS; cudppPlan(&hSortHandle, sortConfig, hPN, 1, 0); return 0; } int copyHostToDevice() { cutilSafeCall( cudaMemcpy(dMaterial, hMaterial, (MAXP * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dPosX, hPosX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dPosY, hPosY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dVelX, hVelX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dVelY, hVelY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dDensity, hDensity, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dEnergy, hEnergy, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dPressure, hPressure, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dVelDotX, hVelDotX, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dVelDotY, hVelDotY, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dDensityDot, hDensityDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dEnergyDot, hEnergyDot, (MAXP * sizeof(float)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dList, hList, (MAXP * MAXN * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dHash, hHash, (MAXP * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dIndex, hIndex, (MAXP * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dSetStart, hSetStart, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dSetStop, hSetStop, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( cudaMemcpy(dGrid.set, hGrid.set, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.nump, hGrid.nump, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.cell, hGrid.cell, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.perm, hGrid.perm, (MAXP * sizeof(int)), cudaMemcpyHostToDevice) ); return 0; } int copyDeviceToHost() { cutilSafeCall( cudaMemcpy(hMaterial, dMaterial, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hPosX, dPosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hPosY, dPosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hVelX, dVelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hVelY, dVelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hDensity, dDensity, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hEnergy, dEnergy, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hPressure, dPressure, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hVelDotX, dVelDotX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hVelDotY, dVelDotY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hDensityDot, dDensityDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hEnergyDot, dEnergyDot, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hList, dList, (MAXP * MAXN * sizeof(int)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hHash, dHash, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hIndex, dIndex, (MAXP * sizeof(int)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hSetStart, dSetStart, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hSetStop, dSetStop, (MAXG * sizeof(int)), cudaMemcpyDeviceToHost) ); return 0; } int backupData() { cutilSafeCall( cudaMemcpy(dPosX0, dPosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); cutilSafeCall( cudaMemcpy(dPosY0, dPosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); cutilSafeCall( cudaMemcpy(dVelX0, dVelX, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); cutilSafeCall( cudaMemcpy(dVelY0, dVelY, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); cutilSafeCall( cudaMemcpy(dDensity0, dDensity, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); cutilSafeCall( cudaMemcpy(dEnergy0, dEnergy, (MAXP * sizeof(float)), cudaMemcpyDeviceToDevice) ); return 0; } int initRun() { /** * \brief Input run data * * Reads the input file for run data * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char tok[10]; int i, m, p, pn; int iv; float fv; int mpn, mpp[10]; // Open stream file stream = fopen("armando.run", "r"); while (!feof(stream)) { sprintf(tok, " "); fscanf(stream, "%s", tok); if (strcmp(tok, "MAT") == 0) { fscanf(stream, "%i", &iv); if ((iv > 0) && (iv <= 50)) m = iv; for (p = 0; p < 10; p++) hMatProp[m][p] = 0.0; if ((m > 0) && (m <= 10)) pn = 3; if ((m > 10) && (m <= 20)) pn = 9; if ((m > 20) && (m <= 30)) pn = 10; if ((m > 30) && (m <= 40)) pn = 5; if ((m > 40) && (m <= 50)) pn = 3; for (p = 0; p < pn; p++) { fscanf(stream, "%f", &fv); hMatProp[m][p] = fv; } printf("Material %d\n", m); printf("hMatProp: \n"); for (p = 0; p < pn; p++) printf(" %f\n", hMatProp[m][p]); printf("\n"); } if (strcmp(tok, "TIME") == 0) { fscanf(stream, "%f", &fv); if (fv > 0.0) hRun.dt = fv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.tsn = iv; fscanf(stream, "%i", &iv); if (iv > 0) hRun.ssi = iv; printf("Time step: %f\n", hRun.dt); printf("Steps: %i\n", hRun.tsn); printf("Save step: %i\n", hRun.ssi); printf("\n"); } if (strcmp(tok, "LIMITS") == 0) { fscanf(stream, "%f", &fv); hRun.minX = fv; fscanf(stream, "%f", &fv); hRun.maxX = fv; fscanf(stream, "%f", &fv); hRun.minY = fv; fscanf(stream, "%f", &fv); hRun.maxY = fv; printf("Domain limits: \n"); printf("X: %+e - %+e \n", hRun.minX, hRun.maxX); printf("Y: %+e - %+e \n", hRun.minY, hRun.maxY); printf("\n"); } if (strcmp(tok, "MONITORS") == 0) { fscanf(stream, "%i", &iv); mpn = iv; for (i = 0; i < mpn; i++) { fscanf(stream, "%i", &iv); mpp[i] = iv; } printf("Monitored particles: %i \n", mpn); if (mpn > 0) { printf("Index:"); for (i = 0; i < mpn; i++) printf(" %i", mpp[i]); printf("\n"); printf("\n"); } } } fclose(stream); hSound = hSmooth / hRun.dt; return 0; } int scanData() { /** * \brief Input particle data file * * Reads particle data from a disk file * * \date Oct 20, 2010 * \author Luca Massidda */ FILE *stream; int i; float fv1, fv2, fv3; int iv; // Stream file position stream = fopen("in_pos.txt", "r"); for (i = 0; !feof(stream); i++) { fscanf(stream, "%e %e ", &fv1, &fv2); hPosX[i] = fv1; hPosY[i] = fv2; } fclose(stream); hPN = i; // Stream file velocity stream = fopen("in_vel.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e", &fv1, &fv2); hVelX[i] = fv1; hVelY[i] = fv2; } fclose(stream); // Stream file info stream = fopen("in_info.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%i %e %e ", &iv, &fv1, &fv2); hMaterial[i] = iv; hMass = fv1; hSmooth = fv2; } fclose(stream); // Stream file field stream = fopen("in_field.txt", "r"); for (i = 0; i < hPN; i++) { fscanf(stream, "%e %e %e ", &fv1, &fv2, &fv3); hDensity[i] = fv1; hPressure[i] = fv2; hEnergy[i] = fv3; } fclose(stream); return 0; } int printData() { /** * \brief Particle data file output * * Saves particle data on a disk file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; int i; // Stream file position stream = fopen("out_pos.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e\n", hPosX[i], hPosY[i]); fclose(stream); // Stream file velocity stream = fopen("out_vel.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e \n", hVelX[i], hVelY[i]); fclose(stream); // Stream file info stream = fopen("out_info.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%i %+14.8e %+14.8e \n", hMaterial[i], hMass, hSmooth); fclose(stream); // Stream file field stream = fopen("out_field.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e \n", hDensity[i], hPressure[i], hEnergy[i]); fclose(stream); // Stream file add1 stream = fopen("out_debug.txt", "w"); for (i = 0; i < hPN; i++) fprintf(stream, "%+14.8e %+14.8e %+14.8e %+14.8e \n", hDensityDot[i], hVelDotX[i], hVelDotY[i], hEnergyDot[i]); fclose(stream); return 0; } int outputCase() { /** * \brief Output Case file * * Saves ensight case file * * \date Jul 5, 2010 * \author Luca Massidda */ FILE *stream; int ts; // Open stream file stream = fopen("armando.case", "w"); fprintf(stream, "# Ensight formatted case file for Armando\n"); fprintf(stream, "\n"); fprintf(stream, "FORMAT\n"); fprintf(stream, "type: ensight gold\n"); fprintf(stream, "\n"); fprintf(stream, "GEOMETRY\n"); fprintf(stream, "model: 1 armando_pos_*****.geo\n"); fprintf(stream, "\n"); fprintf(stream, "VARIABLE\n"); fprintf(stream, "vector per node: 1 velocity armando_vel_*****.dat\n"); fprintf(stream, "scalar per node: 1 density armando_rho_*****.dat\n"); fprintf(stream, "scalar per node: 1 pressure armando_pre_*****.dat\n"); fprintf(stream, "scalar per node: 1 energy armando_ene_*****.dat\n"); fprintf(stream, "\n"); fprintf(stream, "TIME\n"); fprintf(stream, "time set: %i\n", 1); fprintf(stream, "number of steps: %i\n", (hRun.tsn / hRun.ssi + 1)); fprintf(stream, "filename start number: %i\n", 0); fprintf(stream, "filename increment: %i\n", 1); fprintf(stream, "time values:\n"); for (ts = 0; ts <= hRun.tsn; ts++) if ((ts % hRun.ssi) == 0) fprintf(stream, "%14.8e\n", (ts * hRun.dt)); // Close stream file fclose(stream); return 0; } int outputData(int ss) { /** * \brief Output Data file * * Saves ensight data file * * \date Oct 21, 2010 * \author Luca Massidda */ FILE *stream; char filename[80]; int i; // Stream position file sprintf(filename, "armando_pos_%05d.geo", ss); stream = fopen(filename, "w"); fprintf(stream, "Armando output in EnSight Gold format\n"); fprintf(stream, "EnSight 8.0.7\n"); fprintf(stream, "node id assign\n"); fprintf(stream, "element id assign\n"); fprintf(stream, "extents\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, " 1.00000e+38-1.00000e+38\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "SPH particles\n"); fprintf(stream, "coordinates\n"); fprintf(stream, "%10i\n", hPN); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPosX[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPosY[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", 0.0); fclose(stream); // Stream velocity file sprintf(filename, "armando_vel_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle velocity in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hVelX[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hVelY[i]); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", 0.0); fclose(stream); // Stream density file sprintf(filename, "armando_rho_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle density in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hDensity[i]); fclose(stream); // Stream pressure file sprintf(filename, "armando_pre_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle pressure in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hPressure[i]); fclose(stream); // Stream energy file sprintf(filename, "armando_ene_%05d.dat", ss); stream = fopen(filename, "w"); fprintf(stream, "particle energy in EnSight Gold format\n"); fprintf(stream, "part\n"); fprintf(stream, "%10i\n", 1); fprintf(stream, "coordinates\n"); for (i = 0; i < hPN; i++) fprintf(stream, "%+e\n", hEnergy[i]); fclose(stream); return 0; } void initDamBreak() { int i, j, m, pi; double rho, c0, pmin; double dr; m = 1; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; dr = 0.02; // x4 pi = 0; for (j = 0; j <= 50; j++) { for (i = 0; i <= 100; i++) { hPosX[pi] = i * dr + 0.5 * dr; hPosY[pi] = j * dr + 0.8 * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = m; hDensity[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } // 0 - 268 0 - 150 /* for (j = 151; j <= 153; j++) { for (i = -3; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } */ for (j = -3; j <= -1; j++) { for (i = -3; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } for (j = -0; j <= 80; j++) { for (i = -3; i <= -1; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } for (j = -0; j <= 80; j++) { for (i = 269; i <= 271; i++) { hPosX[pi] = i * dr; hPosY[pi] = j * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = -m; hDensity[pi] = rho; // + (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; hPressure[pi] = 0.0; pi++; } } hPN = pi; hSmooth = 1.2 * dr; hMass = rho * dr * dr; hSound = c0; hRun.minX = -1.0; hRun.maxX = 6.0; hRun.minY = -1.0; hRun.maxY = 4.0; hRun.dt = 4.0e-4; //1.0e-3; hRun.tsn = 4000; //1000; hRun.ssi = 200; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.size = 2.0 * hSmooth; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; printf("Dam break in a box \n"); printf("Particles: %i \n", hPN); } void initFree() { int i, j, m, pi; double rho, c0, pmin; double dr; m = 1; rho = 1000.; c0 = 50.; pmin = -1.e12; hMatType[m] = 4; hMatProp[m][0] = rho; hMatProp[m][1] = c0; hMatProp[m][2] = pmin; dr = 0.1; // x4 pi = 0; for (j = 0; j < 10; j++) { for (i = 0; i < 10; i++) { hPosX[pi] = i * dr + 0.0 * dr; hPosY[pi] = j * dr + 0.0 * dr; hVelX[pi] = 0.0; hVelY[pi] = 0.0; hMaterial[pi] = m; hDensity[pi] = rho; //+ (9.81 * rho / c0 / c0 * (50 - j) * dr); hEnergy[pi] = 0.0; pi++; } } hPN = pi; hSmooth = 1.2 * dr; hMass = rho * dr * dr; hSound = c0; hRun.minX = -0.5; hRun.maxX = 1.5; hRun.minY = -0.5; hRun.maxY = 1.5; hRun.dt = 0.5e-2; //1.0e-3; hRun.tsn = 2; //1000; hRun.ssi = 1; hGrid.oX = hRun.minX; hGrid.oY = hRun.minY; hGrid.size = 2.1 * hSmooth; hGrid.nX = (int) ((hRun.maxX - hRun.minX) / hGrid.size) +1; hGrid.nY = (int) ((hRun.maxY - hRun.minY) / hGrid.size) +1; printf("Freefall\n"); printf("Particles: %i \n", hPN); } int iSort(int *array, int *perm, int n) { int i; static int* dummy = NULL; if (!dummy) dummy = (int *) malloc(MAXP * sizeof(int)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int fSort(float *array, int *perm, int n) { int i; static float* dummy = NULL; if (!dummy) dummy = (float *) malloc(MAXP * sizeof(float)); for (i = 0; i < n; i++) dummy[i] = array[i]; for (i = 0; i < n; i++) array[i] = dummy[perm[i]]; return 0; } int updateGrid(void) { int i, j, ix, iy; int maxnump; cutilSafeCall( cudaMemcpy(hPosX, dPosX, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); cutilSafeCall( cudaMemcpy(hPosY, dPosY, (MAXP * sizeof(float)), cudaMemcpyDeviceToHost) ); // Set to zero the index vector of the grid data structure for (i = 0; i <= hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = 0; hGrid.nump[i] = 0; } // The index vector is used to store the number of particles // in each grid cell for (i = 0; i < hPN; i++) { ix = (int) ((hPosX[i] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[i] - hGrid.oY) / hGrid.size); hGrid.nump[ix + iy * hGrid.nX]++; } // The index vector points at the beginning of the particle list // in the grid data structure hGrid.set[0] = 0; for (i = 1; i < hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = hGrid.set[i -1] + hGrid.nump[i -1]; } // The data vector for particles is filled for (i = 0; i < hPN; i++) { ix = (int) ((hPosX[i] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[i] - hGrid.oY) / hGrid.size); j = hGrid.set[ix + iy * hGrid.nX]; hGrid.perm[j] = i; hGrid.set[ix + iy * hGrid.nX]++; } // The index vector points at the beginning of the particle list // in the grid data structure hGrid.set[0] = 0; for (i = 1; i < hGrid.nX * hGrid.nY; i++) { hGrid.set[i] = hGrid.set[i -1] + hGrid.nump[i -1]; } // The cell vector points at the grid position hGrid.cell[0] = 0; j = 0; for (i = 0; i < hGrid.nX * hGrid.nY; i++) { if (hGrid.nump[i] > 0) { hGrid.cell[j] = i; j++; } } hGrid.SN = j; maxnump = 0; for (i = 0; i < hGrid.nX * hGrid.nY; i++) if (hGrid.nump[i] > maxnump) maxnump = hGrid.nump[i]; if (maxnump > ParticlesInSet) printf("Error: Particles in cell limit exceeded. %d > %d\n", maxnump, ParticlesInSet); //printf("Debug: maxnump %d\n", maxnump); dGrid.oX = hGrid.oX; dGrid.oY = hGrid.oY; dGrid.nX = hGrid.nX; dGrid.nY = hGrid.nY; dGrid.size = hGrid.size; dGrid.SN = hGrid.SN; cutilSafeCall( cudaMemcpy(dGrid.set, hGrid.set, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.nump, hGrid.nump, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.cell, hGrid.cell, (MAXG * sizeof(int)), cudaMemcpyHostToDevice) ); cutilSafeCall( cudaMemcpy(dGrid.perm, hGrid.perm, (MAXP * sizeof(int)), cudaMemcpyHostToDevice) ); return 0; } int sortArrays(void) { int blocks, threads; threads = THREADS; blocks = (hPN + threads - 1) / threads; // Particles are re ordered kerSortInt <<< blocks, threads >>> (dMaterial, dIndex, dIntDummy); kerSortFloat <<< blocks, threads >>> (dPosX, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dPosY, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dVelX, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dVelY, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dDensity, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dEnergy, dIndex, dFloatDummy); kerSortFloat <<< blocks, threads >>> (dPressure, dIndex, dFloatDummy); /* iSort(hMaterial, hGrid.perm, hPN); fSort(hPosX, hGrid.perm, hPN); fSort(hPosY, hGrid.perm, hPN); fSort(hVelX, hGrid.perm, hPN); fSort(hVelY, hGrid.perm, hPN); fSort(hDensity, hGrid.perm, hPN); fSort(hEnergy, hGrid.perm, hPN); fSort(hPressure, hGrid.perm, hPN); fSort(hVelDotX, hGrid.perm, hPN); fSort(hVelDotY, hGrid.perm, hPN); fSort(hDensityDot, hGrid.perm, hPN); fSort(hEnergyDot, hGrid.perm, hPN); */ return 0; } int updateList(void) { int ip, ic, ix, iy, il, i, j, jp, jc; float dx, dy, dr; // Particles list is filled for (ip = 0; ip < hPN; ip++) { hList[ip] = 0; for (il = 1; il < MAXN; il++) { hList[ip + il * MAXP] = ip; } ix = (int) ((hPosX[ip] - hGrid.oX) / hGrid.size); iy = (int) ((hPosY[ip] - hGrid.oY) / hGrid.size); ic = ix + iy * hGrid.nX; /* for (jp = 0; jp < hPN; jp++) { dx = hPosX[ip] - hPosX[jp]; dy = hPosY[ip] - hPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2*hSmooth) && (hList[ip] < MAXN -1)) { hList[ip]++; hList[ip + hList[ip] * MAXP] = jp; } } */ for (j = -1; j <= 1; j++) { for (i = -1; i <= 1; i++) { jc = ic + i + j * hGrid.nX; for (il = 0; il < hGrid.nump[jc]; il++) { jp = hGrid.perm[hGrid.set[jc] + il]; dx = hPosX[ip] - hPosX[jp]; dy = hPosY[ip] - hPosY[jp]; dr = sqrtf(dx * dx + dy * dy); if ((dr < 2.0 * hSmooth) && (hList[ip] < MAXN -1)) { hList[ip]++; hList[ip + hList[ip] * MAXP] = jp; } } } } } /* printf("hGrid\n"); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("cell %d : %d\n", ic, hGrid.cell[ic]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("set %d : %d\n", ic, hGrid.set[ic]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) printf("nump %d : %d\n", ic, hGrid.nump[ic]); for (ip = 0; ip < hPN; ip++) printf("perm %d : %d\n", ip, hGrid.perm[ip]); for (ic = 0; ic < hGrid.nX * hGrid.nY; ic++) if (hGrid.nump[ic] >0) { printf("%d ", hGrid.nump[ic]); for (il = 0; il < hGrid.nump[ic]; il++) { printf("%d ", hGrid.set[ic] + il); } printf("\n"); } printf("\n"); for (ip = 0; ip < hPN; ip++) { for (il = 0; il < MAXN; il++) { printf("%d ", hList[ip + il * MAXP]); } printf("\n"); } printf("\n"); */ cutilSafeCall( cudaMemcpy(dList, hList, (MAXP * MAXN * sizeof(int)), cudaMemcpyHostToDevice) ); return 0; } int integrateRungeKutta3(void) { /** * \brief Runge Kutta 3rd order time integration * * Integrate the Navier Stokes equations in time with the * Total Variation Diminishing Runge-Kutta algorithm of the 3rd order * * \date Dec 20, 2010 * \author Luca Massidda */ int ts; // Output outputCase(); copyHostToDevice(); int blocks1 = (hPN + THREADS - 1) / THREADS; int threads1 = THREADS; // TIME CYCLE for (ts = 0; ts <= hRun.tsn; ts++) { // Output data if ((ts % hRun.ssi) == 0) { printf("Saving time: %g \n", ts * hRun.dt); copyDeviceToHost(); printData(); outputData(ts / hRun.ssi); } // ******* DA FARE ******* // salvare gli output dei monitor su vettori velocizza molto l'esecuzione // e permette una pił efficace parallelizzazione!!! //output_monitor(pn, p0, dt * ts, mpn, mpp); // Calculate neighbouring particles kerHash <<< blocks1, THREADS >>> (dGrid, dPosX, dPosY, dHash, dIndex); cudppSort(hSortHandle, dHash, dIndex, 18, hPN); cutilSafeCall( cudaMemset(dSetStart, -1, MAXG * sizeof(int))); cutilSafeCall( cudaMemset(dSetStop, -1, MAXG * sizeof(int))); kerGrid <<< blocks1, THREADS >>> (dSetStart, dSetStop, dHash); sortArrays(); kerList <<< blocks1, THREADS >>> (dList, dSetStart, dSetStop, dGrid, dPosX, dPosY); backupData(); // Step 1 // External forces updateForces <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions kerInteraction <<< blocks1, threads1 >>> (dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); balanceEnergy <<< blocks1, threads1 >>> (dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles kerUpdate <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 1.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); // Step 2 // External forces updateForces <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions kerInteraction <<< blocks1, threads1 >>> (dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); balanceEnergy <<< blocks1, threads1 >>> (dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles kerUpdate <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 1.0 / 4.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); // Step 3 // External forces updateForces <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot); // Calculate particle interactions kerInteraction <<< blocks1, threads1 >>> (dList, dPosX, dPosY, dVelX, dVelY, dDensity, dPressure, dDensityDot, dVelDotX, dVelDotY); balanceEnergy <<< blocks1, threads1 >>> (dPressure, dDensity, dDensityDot, dEnergyDot); // Update particles kerUpdate <<< blocks1, threads1 >>> (dMaterial, dVelDotX, dVelDotY, dDensityDot, dEnergyDot, 2.0 / 3.0, dPosX0, dPosY0, dVelX0, dVelY0, dDensity0, dEnergy0, dPosX, dPosY, dVelX, dVelY, dDensity, dEnergy, dPressure); } cutilSafeCall( cudaThreadExit() ); return 0; } int main() { /** * \brief armando2D v2.0 * * An SPH code for non stationary fluid dynamics. * This is the reviewed and improved C version of Armando v1.0 * developed at CERN in 2008 * * \date Oct 20, 2010 * \author Luca Massidda */ initHost(); initDamBreak(); //initFree(); initDevice(); initCUDPP(); integrateRungeKutta3(); return 0; }
cbdad74a7bd5e97c26aafd4ed0a5177518d5ae42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <fstream> #include <malloc.h> #include <vector> using namespace std; #define BLOCK_SIZE 128 #define ArraySize 100000 #define AbsMaxVal 10 void generate_random_arr(float* A); void exec_first_condition(float* A, float* B); void exec_second_condition(float* A, float* B); void exec_third_condition(float* A, float* B); float sum_particles_host(float* d_A_even, float* d_A_odd); texture<float, 1, hipReadModeElementType> FirstArrElementsRef; texture<float, 1, hipReadModeElementType> SecondArrElementsRef; texture<float, 1, hipReadModeElementType> SumArrElementsRef; __global__ void mult_particles_first(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1D(FirstArrElementsRef, i), tex1D(SecondArrElementsRef, i)); } __global__ void mult_particles_second(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1Dfetch(FirstArrElementsRef, i), tex1D(SecondArrElementsRef, i)); } __global__ void mult_particles_third(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1Dfetch(FirstArrElementsRef, i), tex1Dfetch(SecondArrElementsRef, i)); } __global__ void sum_particles(float* A, int size, int iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < iterations) { int start = idx * BLOCK_SIZE; int end = min((idx + 1) * BLOCK_SIZE, size); A[idx] = 0; for (int j = start; j < end; j++) A[idx] = __fadd_rn(A[idx], tex1Dfetch(SumArrElementsRef, j)); } } int main() { /*ifstream file; file.open("input.txt"); vector<float> data; int length = 0; while (!file.eof()) { float a; file >> a; data.push_back(a); length++; } file.close(); length /= 2; float* A, * B; A = &data[0]; B = &data[length];*/ float* A, * B; A = (float*)malloc(sizeof(float) * ArraySize); B = (float*)malloc(sizeof(float) * ArraySize); generate_random_arr(A); generate_random_arr(B); exec_first_condition(A, B); exec_second_condition(A, B); exec_third_condition(A, B); //data.clear(); hipUnbindTexture(FirstArrElementsRef); hipUnbindTexture(SecondArrElementsRef); hipUnbindTexture(SumArrElementsRef); return 0; } void exec_first_condition(float* A, float* B) { float* d_C_odd, *d_C_even; hipArray* d_A, * d_B; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); hipMalloc((void**)&d_C_odd, size); hipMalloc((void**)&d_C_even, size); hipMallocArray(&d_A, &FirstArrElementsRef.channelDesc, ArraySize, 1); hipMallocArray(&d_B, &SecondArrElementsRef.channelDesc, ArraySize, 1); hipMemcpyToArray(d_A, 0, 0, A, size, hipMemcpyHostToDevice); hipMemcpyToArray(d_B, 0, 0, B, size, hipMemcpyHostToDevice); hipBindTextureToArray(FirstArrElementsRef, d_A); hipBindTextureToArray(SecondArrElementsRef, d_B); float KernelTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); mult_particles_first << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); hipBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&KernelTime, start, stop); printf("First condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); hipUnbindTexture(SumArrElementsRef); hipFree(d_C_odd); hipFree(d_C_even); hipFreeArray(d_A); hipFreeArray(d_B); hipEventDestroy(start); hipEventDestroy(stop); } void exec_second_condition(float* A, float* B) { float* d_A, *d_C_odd, *d_C_even; hipArray* d_B; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_C_odd, size); hipMalloc((void**)&d_C_even, size); hipMallocArray(&d_B, &SecondArrElementsRef.channelDesc, ArraySize, 1); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipBindTexture(0, FirstArrElementsRef, d_A, size); hipMemcpyToArray(d_B, 0, 0, B, size, hipMemcpyHostToDevice); hipBindTextureToArray(SecondArrElementsRef, d_B); float KernelTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); mult_particles_second << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); hipBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&KernelTime, start, stop); printf("Second condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); hipUnbindTexture(SumArrElementsRef); hipFree(d_C_odd); hipFree(d_C_even); hipFree(d_A); hipFreeArray(d_B); hipEventDestroy(start); hipEventDestroy(stop); } void exec_third_condition(float* A, float* B) { float* d_A, *d_B, * d_C_odd, * d_C_even; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C_odd, size); hipMalloc((void**)&d_C_even, size); hipMemcpy(d_A, A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, B, size, hipMemcpyHostToDevice); hipBindTexture(0, FirstArrElementsRef, d_A, size); hipBindTexture(0, SecondArrElementsRef, d_B, size); float KernelTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); mult_particles_third << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); hipBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&KernelTime, start, stop); printf("Third condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); hipUnbindTexture(SumArrElementsRef); hipFree(d_A); hipFree(d_B); hipFree(d_C_odd); hipFree(d_C_even); hipEventDestroy(start); hipEventDestroy(stop); } float sum_particles_host(float* d_A_even, float* d_A_odd) { bool isOdd = true; int arr_length = ArraySize; int iterations = arr_length / BLOCK_SIZE + (arr_length % BLOCK_SIZE != 0 ? 1 : 0); while (arr_length != 1) { int GRID_SIZE = iterations / BLOCK_SIZE + (iterations % BLOCK_SIZE != 0 ? 1 : 0); if (isOdd) { sum_particles << <GRID_SIZE, BLOCK_SIZE >> > (d_A_even, arr_length, iterations); hipBindTexture(0, SumArrElementsRef, d_A_even, sizeof(float) * iterations); } else { sum_particles << <GRID_SIZE, BLOCK_SIZE >> > (d_A_odd, arr_length, iterations); hipBindTexture(0, SumArrElementsRef, d_A_odd, sizeof(float) * iterations); } hipDeviceSynchronize(); arr_length = iterations; iterations = arr_length / BLOCK_SIZE + (arr_length % BLOCK_SIZE != 0 ? 1 : 0); isOdd = !isOdd; } float* result; result = (float*)malloc(sizeof(float)); hipMemcpy(result, isOdd ? d_A_odd : d_A_even, sizeof(float), hipMemcpyDeviceToHost); return *result; } void generate_random_arr(float* A) { for (int i = 0; i < ArraySize; i++) { A[i] = (rand() / (float)RAND_MAX) * (AbsMaxVal * 2 + 1) + -1 * AbsMaxVal; //printf("%f ", A[i]); } //printf("\n"); }
cbdad74a7bd5e97c26aafd4ed0a5177518d5ae42.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <fstream> #include <malloc.h> #include <vector> using namespace std; #define BLOCK_SIZE 128 #define ArraySize 100000 #define AbsMaxVal 10 void generate_random_arr(float* A); void exec_first_condition(float* A, float* B); void exec_second_condition(float* A, float* B); void exec_third_condition(float* A, float* B); float sum_particles_host(float* d_A_even, float* d_A_odd); texture<float, 1, cudaReadModeElementType> FirstArrElementsRef; texture<float, 1, cudaReadModeElementType> SecondArrElementsRef; texture<float, 1, cudaReadModeElementType> SumArrElementsRef; __global__ void mult_particles_first(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1D(FirstArrElementsRef, i), tex1D(SecondArrElementsRef, i)); } __global__ void mult_particles_second(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1Dfetch(FirstArrElementsRef, i), tex1D(SecondArrElementsRef, i)); } __global__ void mult_particles_third(float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < ArraySize) C[i] = __fmul_rn(tex1Dfetch(FirstArrElementsRef, i), tex1Dfetch(SecondArrElementsRef, i)); } __global__ void sum_particles(float* A, int size, int iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < iterations) { int start = idx * BLOCK_SIZE; int end = min((idx + 1) * BLOCK_SIZE, size); A[idx] = 0; for (int j = start; j < end; j++) A[idx] = __fadd_rn(A[idx], tex1Dfetch(SumArrElementsRef, j)); } } int main() { /*ifstream file; file.open("input.txt"); vector<float> data; int length = 0; while (!file.eof()) { float a; file >> a; data.push_back(a); length++; } file.close(); length /= 2; float* A, * B; A = &data[0]; B = &data[length];*/ float* A, * B; A = (float*)malloc(sizeof(float) * ArraySize); B = (float*)malloc(sizeof(float) * ArraySize); generate_random_arr(A); generate_random_arr(B); exec_first_condition(A, B); exec_second_condition(A, B); exec_third_condition(A, B); //data.clear(); cudaUnbindTexture(FirstArrElementsRef); cudaUnbindTexture(SecondArrElementsRef); cudaUnbindTexture(SumArrElementsRef); return 0; } void exec_first_condition(float* A, float* B) { float* d_C_odd, *d_C_even; cudaArray* d_A, * d_B; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); cudaMalloc((void**)&d_C_odd, size); cudaMalloc((void**)&d_C_even, size); cudaMallocArray(&d_A, &FirstArrElementsRef.channelDesc, ArraySize, 1); cudaMallocArray(&d_B, &SecondArrElementsRef.channelDesc, ArraySize, 1); cudaMemcpyToArray(d_A, 0, 0, A, size, cudaMemcpyHostToDevice); cudaMemcpyToArray(d_B, 0, 0, B, size, cudaMemcpyHostToDevice); cudaBindTextureToArray(FirstArrElementsRef, d_A); cudaBindTextureToArray(SecondArrElementsRef, d_B); float KernelTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); mult_particles_first << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); cudaBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&KernelTime, start, stop); printf("First condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); cudaUnbindTexture(SumArrElementsRef); cudaFree(d_C_odd); cudaFree(d_C_even); cudaFreeArray(d_A); cudaFreeArray(d_B); cudaEventDestroy(start); cudaEventDestroy(stop); } void exec_second_condition(float* A, float* B) { float* d_A, *d_C_odd, *d_C_even; cudaArray* d_B; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_C_odd, size); cudaMalloc((void**)&d_C_even, size); cudaMallocArray(&d_B, &SecondArrElementsRef.channelDesc, ArraySize, 1); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaBindTexture(0, FirstArrElementsRef, d_A, size); cudaMemcpyToArray(d_B, 0, 0, B, size, cudaMemcpyHostToDevice); cudaBindTextureToArray(SecondArrElementsRef, d_B); float KernelTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); mult_particles_second << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); cudaBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&KernelTime, start, stop); printf("Second condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); cudaUnbindTexture(SumArrElementsRef); cudaFree(d_C_odd); cudaFree(d_C_even); cudaFree(d_A); cudaFreeArray(d_B); cudaEventDestroy(start); cudaEventDestroy(stop); } void exec_third_condition(float* A, float* B) { float* d_A, *d_B, * d_C_odd, * d_C_even; size_t size = sizeof(float) * ArraySize; int GRID_SIZE = ArraySize / BLOCK_SIZE + (ArraySize % BLOCK_SIZE != 0 ? 1 : 0); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C_odd, size); cudaMalloc((void**)&d_C_even, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaBindTexture(0, FirstArrElementsRef, d_A, size); cudaBindTexture(0, SecondArrElementsRef, d_B, size); float KernelTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); mult_particles_third << <GRID_SIZE, BLOCK_SIZE >> > (d_C_odd); cudaBindTexture(0, SumArrElementsRef, d_C_odd, size); float result = sum_particles_host(d_C_even, d_C_odd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&KernelTime, start, stop); printf("Third condition:\n"); printf("Result: %f\n", result); printf("Elapsed time: %f\n", KernelTime); printf("\n"); cudaUnbindTexture(SumArrElementsRef); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C_odd); cudaFree(d_C_even); cudaEventDestroy(start); cudaEventDestroy(stop); } float sum_particles_host(float* d_A_even, float* d_A_odd) { bool isOdd = true; int arr_length = ArraySize; int iterations = arr_length / BLOCK_SIZE + (arr_length % BLOCK_SIZE != 0 ? 1 : 0); while (arr_length != 1) { int GRID_SIZE = iterations / BLOCK_SIZE + (iterations % BLOCK_SIZE != 0 ? 1 : 0); if (isOdd) { sum_particles << <GRID_SIZE, BLOCK_SIZE >> > (d_A_even, arr_length, iterations); cudaBindTexture(0, SumArrElementsRef, d_A_even, sizeof(float) * iterations); } else { sum_particles << <GRID_SIZE, BLOCK_SIZE >> > (d_A_odd, arr_length, iterations); cudaBindTexture(0, SumArrElementsRef, d_A_odd, sizeof(float) * iterations); } cudaDeviceSynchronize(); arr_length = iterations; iterations = arr_length / BLOCK_SIZE + (arr_length % BLOCK_SIZE != 0 ? 1 : 0); isOdd = !isOdd; } float* result; result = (float*)malloc(sizeof(float)); cudaMemcpy(result, isOdd ? d_A_odd : d_A_even, sizeof(float), cudaMemcpyDeviceToHost); return *result; } void generate_random_arr(float* A) { for (int i = 0; i < ArraySize; i++) { A[i] = (rand() / (float)RAND_MAX) * (AbsMaxVal * 2 + 1) + -1 * AbsMaxVal; //printf("%f ", A[i]); } //printf("\n"); }
3eae77405146410e1c32915cdd7fa4ea4046be09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> int main() { thrust::host_vector<int> data_h; thrust::device_vector<int> data_d; thrust::device_vector<int> out_d; for (int i = 0; i < 10; i++) { int x = i + ((i % 3) == 0) * 2; data_h.push_back(x); printf("%3d ", x); } putchar('\n'); data_d = data_h; out_d.resize(data_d.size()); thrust::exclusive_scan(data_d.begin(), data_d.end(), out_d.begin(), -1, thrust::maximum<int>()); data_h = out_d; for (thrust::host_vector<int>::iterator i = data_h.begin(); i != data_h.end(); i++) printf("%3d ", *i); putchar('\n'); }
3eae77405146410e1c32915cdd7fa4ea4046be09.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> int main() { thrust::host_vector<int> data_h; thrust::device_vector<int> data_d; thrust::device_vector<int> out_d; for (int i = 0; i < 10; i++) { int x = i + ((i % 3) == 0) * 2; data_h.push_back(x); printf("%3d ", x); } putchar('\n'); data_d = data_h; out_d.resize(data_d.size()); thrust::exclusive_scan(data_d.begin(), data_d.end(), out_d.begin(), -1, thrust::maximum<int>()); data_h = out_d; for (thrust::host_vector<int>::iterator i = data_h.begin(); i != data_h.end(); i++) printf("%3d ", *i); putchar('\n'); }
0722b0f28be43e5611e351938f0c19069e4933a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //#include <sndfile.h> #include <iostream> #include <fstream> #include <vector> #include <string> #include "Config\Config.h" #include "Microphone\MicrophoneArray.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #ifdef WIN32 #include <windows.h> float get_time() { LARGE_INTEGER t, f; QueryPerformanceCounter(&t); QueryPerformanceFrequency(&f); return (float)t.QuadPart / (float)f.QuadPart; } #else #include <sys/time.h> #include <sys/resource.h> float get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } #endif struct MicParamsGpu { float *outputData; float *rawData; int packetSize; float *leapData; int leapStride; int *delays; int arraySize; int stride; }; hipError_t beamformWithCudaHelper(MicrophoneArray& array, SharpVector& outputData); __device__ float GetElement(MicParamsGpu params, int curMic, int index ) { if (params.packetSize > index) { return params.rawData[curMic*params.packetSize + index]; } else { int leapIndex = (index - params.packetSize) + curMic*params.leapStride; return params.leapData[leapIndex]; } } __global__ void beamformKernel2(MicParamsGpu params) { int xIndex = threadIdx.x; int currentStartIndex = xIndex * params.stride; for (int k = 0; k < params.stride; k++) { float curVal = 0; for (int i = 0; i < params.arraySize; i++) { curVal = GetElement(params, i, currentStartIndex + k + params.delays[i]); } params.outputData[currentStartIndex + k] = curVal; } } int createPulse(SharpVector& data, size_t readSize, float sampleRate) { float f0 = 1500; float ts = 1.0 / sampleRate; //float vz = 3000; for (size_t i = 0; i < readSize; i++) { float realTime = i * ts; float realPart = cos(2.0*GLOBAL_PI*realTime*f0); data.push_back(realPart); } return data.size(); } int main() { Config& ins = Config::getInstance(); ins.samplePerSecond = 44000; ins.arraySize = 32; ins.distBetweenMics = 10; ins.packetSize = 44000; MicrophoneArray array; SharpVector rawData; createPulse(rawData, 44000, 44000); array.InsertSound(rawData, 1000, 45); float startTime = get_time(); SharpVector outputData(Config::getInstance().packetSize); array.Beamform(outputData, 1000, 45); // CPU toplama //beamformWithCudaHelper(array, outputData); // GPU toplama float endTime = get_time(); std::cout << "CPU Time spent: " << endTime - startTime; } // Helper function for using CUDA to add vectors in parallel. hipError_t beamformWithCudaHelper(MicrophoneArray& array, SharpVector& outputData) { hipError_t cudaStatus; MicParamsGpu params; params.arraySize = array.micropshoneList.size(); params.packetSize = Config::getInstance().packetSize; params.leapStride = Config::getInstance().getMicMaxDelay()*2; hipMalloc(&params.rawData, array.micropshoneList.size() * sizeof(float) * params.packetSize); hipMalloc(&params.leapData, array.micropshoneList.size() * sizeof(float) * params.leapStride); hipMalloc(&params.delays, array.micropshoneList.size() * sizeof(int)); hipMalloc(&params.outputData, Config::getInstance().packetSize * sizeof(float) ); std::vector<int> delayVec; params.stride = params.packetSize / 1000; cudaStatus = hipGetLastError(); for (int i = 0; i < params.arraySize; i++) { hipMemcpy( params.rawData + i * params.packetSize, array.micropshoneList[i].getData().data(), params.packetSize* sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipGetLastError(); hipMemcpy(params.leapData + i * params.leapStride, array.micropshoneList[i].getLeapData().data(), params.leapStride* sizeof(float), hipMemcpyHostToDevice); cudaStatus = hipGetLastError(); delayVec.push_back(array.micropshoneList[i].getDelay(1000, 45) + Config::getInstance().getMicMaxDelay()); } cudaStatus = hipGetLastError(); hipMemcpy(params.delays, delayVec.data(), delayVec.size() * sizeof(int), hipMemcpyHostToDevice); float startTime = get_time(); // Launch a kernel on the GPU with one thread for each element. beamformKernel2 << <1, 1000 >> >(params); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. hipMemcpy(outputData.data(), params.outputData, Config::getInstance().packetSize * sizeof(float), hipMemcpyDeviceToHost); float endTime = get_time(); std::cout << "CPU Time spent: " << endTime - startTime; return cudaStatus; }
0722b0f28be43e5611e351938f0c19069e4933a9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> //#include <sndfile.h> #include <iostream> #include <fstream> #include <vector> #include <string> #include "Config\Config.h" #include "Microphone\MicrophoneArray.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #ifdef WIN32 #include <windows.h> float get_time() { LARGE_INTEGER t, f; QueryPerformanceCounter(&t); QueryPerformanceFrequency(&f); return (float)t.QuadPart / (float)f.QuadPart; } #else #include <sys/time.h> #include <sys/resource.h> float get_time() { struct timeval t; struct timezone tzp; gettimeofday(&t, &tzp); return t.tv_sec + t.tv_usec*1e-6; } #endif struct MicParamsGpu { float *outputData; float *rawData; int packetSize; float *leapData; int leapStride; int *delays; int arraySize; int stride; }; cudaError_t beamformWithCudaHelper(MicrophoneArray& array, SharpVector& outputData); __device__ float GetElement(MicParamsGpu params, int curMic, int index ) { if (params.packetSize > index) { return params.rawData[curMic*params.packetSize + index]; } else { int leapIndex = (index - params.packetSize) + curMic*params.leapStride; return params.leapData[leapIndex]; } } __global__ void beamformKernel2(MicParamsGpu params) { int xIndex = threadIdx.x; int currentStartIndex = xIndex * params.stride; for (int k = 0; k < params.stride; k++) { float curVal = 0; for (int i = 0; i < params.arraySize; i++) { curVal = GetElement(params, i, currentStartIndex + k + params.delays[i]); } params.outputData[currentStartIndex + k] = curVal; } } int createPulse(SharpVector& data, size_t readSize, float sampleRate) { float f0 = 1500; float ts = 1.0 / sampleRate; //float vz = 3000; for (size_t i = 0; i < readSize; i++) { float realTime = i * ts; float realPart = cos(2.0*GLOBAL_PI*realTime*f0); data.push_back(realPart); } return data.size(); } int main() { Config& ins = Config::getInstance(); ins.samplePerSecond = 44000; ins.arraySize = 32; ins.distBetweenMics = 10; ins.packetSize = 44000; MicrophoneArray array; SharpVector rawData; createPulse(rawData, 44000, 44000); array.InsertSound(rawData, 1000, 45); float startTime = get_time(); SharpVector outputData(Config::getInstance().packetSize); array.Beamform(outputData, 1000, 45); // CPU toplama //beamformWithCudaHelper(array, outputData); // GPU toplama float endTime = get_time(); std::cout << "CPU Time spent: " << endTime - startTime; } // Helper function for using CUDA to add vectors in parallel. cudaError_t beamformWithCudaHelper(MicrophoneArray& array, SharpVector& outputData) { cudaError_t cudaStatus; MicParamsGpu params; params.arraySize = array.micropshoneList.size(); params.packetSize = Config::getInstance().packetSize; params.leapStride = Config::getInstance().getMicMaxDelay()*2; cudaMalloc(&params.rawData, array.micropshoneList.size() * sizeof(float) * params.packetSize); cudaMalloc(&params.leapData, array.micropshoneList.size() * sizeof(float) * params.leapStride); cudaMalloc(&params.delays, array.micropshoneList.size() * sizeof(int)); cudaMalloc(&params.outputData, Config::getInstance().packetSize * sizeof(float) ); std::vector<int> delayVec; params.stride = params.packetSize / 1000; cudaStatus = cudaGetLastError(); for (int i = 0; i < params.arraySize; i++) { cudaMemcpy( params.rawData + i * params.packetSize, array.micropshoneList[i].getData().data(), params.packetSize* sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaGetLastError(); cudaMemcpy(params.leapData + i * params.leapStride, array.micropshoneList[i].getLeapData().data(), params.leapStride* sizeof(float), cudaMemcpyHostToDevice); cudaStatus = cudaGetLastError(); delayVec.push_back(array.micropshoneList[i].getDelay(1000, 45) + Config::getInstance().getMicMaxDelay()); } cudaStatus = cudaGetLastError(); cudaMemcpy(params.delays, delayVec.data(), delayVec.size() * sizeof(int), cudaMemcpyHostToDevice); float startTime = get_time(); // Launch a kernel on the GPU with one thread for each element. beamformKernel2 << <1, 1000 >> >(params); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaMemcpy(outputData.data(), params.outputData, Config::getInstance().packetSize * sizeof(float), cudaMemcpyDeviceToHost); float endTime = get_time(); std::cout << "CPU Time spent: " << endTime - startTime; return cudaStatus; }
f2e2f5dee4a93dd54197e1c1c0c0fe8ef829404e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <unittest/unittest.h> #include <thrust/equal.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> template<typename Iterator1, typename Iterator2, typename Iterator3> __global__ void equal_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 result) { *result = thrust::equal(thrust::seq, first1, last1, first2); } template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3> __global__ void equal_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, BinaryPredicate pred, Iterator3 result) { *result = thrust::equal(thrust::seq, first1, last1, first2, pred); } template<typename T> void TestEqualDeviceSeq(const size_t n) { thrust::device_vector<T> d_data1 = unittest::random_samples<T>(n); thrust::device_vector<T> d_data2 = unittest::random_samples<T>(n); thrust::device_vector<bool> d_result(1, false); //empty ranges hipLaunchKernelGGL(( equal_kernel), dim3(1),dim3(1), 0, 0, d_data1.begin(), d_data1.begin(), d_data1.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); //symmetric cases hipLaunchKernelGGL(( equal_kernel), dim3(1),dim3(1), 0, 0, d_data1.begin(), d_data1.end(), d_data1.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); if(n > 0) { d_data1[0] = 0; d_data2[0] = 1; //different vectors hipLaunchKernelGGL(( equal_kernel), dim3(1),dim3(1), 0, 0, d_data1.begin(), d_data1.end(), d_data2.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], false); //different predicates hipLaunchKernelGGL(( equal_kernel), dim3(1),dim3(1), 0, 0, d_data1.begin(), d_data1.begin() + 1, d_data2.begin(), thrust::less<T>(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); hipLaunchKernelGGL(( equal_kernel), dim3(1),dim3(1), 0, 0, d_data1.begin(), d_data1.begin() + 1, d_data2.begin(), thrust::greater<T>(), d_result.begin()); ASSERT_EQUAL(d_result[0], false); } } DECLARE_VARIABLE_UNITTEST(TestEqualDeviceSeq); void TestEqualCudaStreams() { thrust::device_vector<int> v1(5); thrust::device_vector<int> v2(5); v1[0] = 5; v1[1] = 2; v1[2] = 0; v1[3] = 0; v1[4] = 0; v2[0] = 5; v2[1] = 2; v2[2] = 0; v2[3] = 6; v2[4] = 1; hipStream_t s; hipStreamCreate(&s); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.end(), v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.end(), v2.begin()), false); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v2.begin(), v2.end(), v2.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.begin() + 0, v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.begin() + 1, v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.begin() + 3, v2.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.begin() + 4, v2.begin()), false); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.end(), v2.begin(), thrust::less_equal<int>()), true); ASSERT_EQUAL(thrust::equal(thrust::hip::par(s), v1.begin(), v1.end(), v2.begin(), thrust::greater<int>()), false); hipStreamDestroy(s); } DECLARE_UNITTEST(TestEqualCudaStreams);
f2e2f5dee4a93dd54197e1c1c0c0fe8ef829404e.cu
#include <unittest/unittest.h> #include <thrust/equal.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> template<typename Iterator1, typename Iterator2, typename Iterator3> __global__ void equal_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, Iterator3 result) { *result = thrust::equal(thrust::seq, first1, last1, first2); } template<typename Iterator1, typename Iterator2, typename BinaryPredicate, typename Iterator3> __global__ void equal_kernel(Iterator1 first1, Iterator1 last1, Iterator2 first2, BinaryPredicate pred, Iterator3 result) { *result = thrust::equal(thrust::seq, first1, last1, first2, pred); } template<typename T> void TestEqualDeviceSeq(const size_t n) { thrust::device_vector<T> d_data1 = unittest::random_samples<T>(n); thrust::device_vector<T> d_data2 = unittest::random_samples<T>(n); thrust::device_vector<bool> d_result(1, false); //empty ranges equal_kernel<<<1,1>>>(d_data1.begin(), d_data1.begin(), d_data1.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); //symmetric cases equal_kernel<<<1,1>>>(d_data1.begin(), d_data1.end(), d_data1.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); if(n > 0) { d_data1[0] = 0; d_data2[0] = 1; //different vectors equal_kernel<<<1,1>>>(d_data1.begin(), d_data1.end(), d_data2.begin(), d_result.begin()); ASSERT_EQUAL(d_result[0], false); //different predicates equal_kernel<<<1,1>>>(d_data1.begin(), d_data1.begin() + 1, d_data2.begin(), thrust::less<T>(), d_result.begin()); ASSERT_EQUAL(d_result[0], true); equal_kernel<<<1,1>>>(d_data1.begin(), d_data1.begin() + 1, d_data2.begin(), thrust::greater<T>(), d_result.begin()); ASSERT_EQUAL(d_result[0], false); } } DECLARE_VARIABLE_UNITTEST(TestEqualDeviceSeq); void TestEqualCudaStreams() { thrust::device_vector<int> v1(5); thrust::device_vector<int> v2(5); v1[0] = 5; v1[1] = 2; v1[2] = 0; v1[3] = 0; v1[4] = 0; v2[0] = 5; v2[1] = 2; v2[2] = 0; v2[3] = 6; v2[4] = 1; cudaStream_t s; cudaStreamCreate(&s); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.end(), v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.end(), v2.begin()), false); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v2.begin(), v2.end(), v2.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.begin() + 0, v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.begin() + 1, v1.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.begin() + 3, v2.begin()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.begin() + 4, v2.begin()), false); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.end(), v2.begin(), thrust::less_equal<int>()), true); ASSERT_EQUAL(thrust::equal(thrust::cuda::par(s), v1.begin(), v1.end(), v2.begin(), thrust::greater<int>()), false); cudaStreamDestroy(s); } DECLARE_UNITTEST(TestEqualCudaStreams);
8a7119cfd435318c45b07573b80e255c87176243.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #ifndef M_PI #define M_PI 3.14159265 #endif #include "cuda_mmult_kernels.h" // define macro OUTPUT to print input & output matrix //#define OUTPUT // define macro QUERY_DEVICES to print device information //#define QUERY_DEVICES void checkCUDAError(const char *msg); void zeroMatrix(float *A, int n); void dstMatrix(float *A, int n); void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_freeMatrix(float *Ad); void printMatrix(const char* name, float *A, int n); void printDeviceInfo(hipDeviceProp_t devProp); int main(int argc, const char *argv[]) { float *A,*B,*C; /* arrays for matrices */ int n, m; /* n=matrix size, m=repeats */ hipEvent_t start_timer, stop_timer; float gpu_time; #ifdef QUERY_DEVICES // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDeviceInfo(devProp); } #endif if (argc < 3) { printf("Error: please specify matrix size and number of multiplications: \n"); printf("Usage: %s <size> <repeat> \n", argv[0]); exit(1); }; /* read matrix size and number of repeats */ n = atoi(argv[1]); m = atoi(argv[2]); if (n % TILE_SIZE != 0) { printf("Error: matrix size has to be a multiple of tile size %d \n", TILE_SIZE); exit(1); }; hipEventCreate(&start_timer); hipEventCreate(&stop_timer); printf("Matrix mult. of size %d (%d repeats): \n", n, m); /* allocate and initialise matrices in host memory */ int size = n*n*sizeof(float); A = (float *) malloc(size); dstMatrix(A,n); B = (float *) malloc(size); dstMatrix(B,n); C = (float *) malloc(size); zeroMatrix(C,n); #ifdef OUTPUT printMatrix("A",A,n); printMatrix("B",B,n); #endif /* allocate matrices in device memory and transfer matrices from host to device memory */ float *Ad, *Bd, *Cd; hipMalloc((void**)&Ad, size); checkCUDAError("allocate memory for A"); hipMalloc((void**)&Bd, size); checkCUDAError("allocate memory for B"); hipMalloc((void**)&Cd, size); checkCUDAError("allocate memory for C"); hipMemcpy(Ad,A, size, hipMemcpyHostToDevice); checkCUDAError("memory of A not transferred"); hipMemcpy(Bd,B, size, hipMemcpyHostToDevice); checkCUDAError("memory of B not transferred"); hipMemcpy(Cd,C, size, hipMemcpyHostToDevice); checkCUDAError("memory of C not transferred"); /* perform matrix multiplication (m repeats) */ hipEventRecord(start_timer, 0); //CPU_matrixMult(A, B, C, n, m); CUDA_matrixMult(Ad,Bd,Cd,n,m); hipEventRecord(stop_timer, 0); /* transfer result matrix back from device to host memory and deallocate device matrices */ hipMemcpy(C,Cd, size, hipMemcpyDeviceToHost); checkCUDAError("memory of C not transferred back"); hipFree(Ad); hipFree(Bd); hipFree(Cd); #ifdef OUTPUT printMatrix("C", C, n); #endif /* deallocate host matrices, print results */ free(A); free(B); free(C); hipEventSynchronize(stop_timer); hipEventElapsedTime(&gpu_time, start_timer, stop_timer); printf("Elapsed time : %.3f s \n", gpu_time / 1000.0f); printf("Performance : %.0f MFlop/s \n", float(m) * (2.0f * n - 1.0f) * n * n / (gpu_time / 1000.0f * 1024.f * 1024.f)); hipEventDestroy(start_timer); hipEventDestroy(stop_timer); return(0); } /* set Matrix values to zero */ void zeroMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = 0; } /* initialise Matrix: discrete Sine Transform */ void dstMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = sin( ((i+1)*(k+1)*M_PI)/(n+1)); } /* * matrix multiplication C += A*B * -> standard C implementation */ void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats) { int i,j,k; float tmp; for(int r=0; r<repeats; r++) { for (i=0; i<n; i++) { for (j=0; j<n; j++) { tmp = A[i*n+j]; for (k=0; k<n; k++) { C[i*n+k] += tmp * B[j*n+k]; } } } } } /* * matrix multiplication C += A*B * -> CUDA implementation: kernel invocation * (implementation adopted from Kirk&Hwu: * "Programming Massively Parallel Processors, chapter 3) */ __host__ void CUDA_matrixMult(float *Ad, float *Bd, float *Cd, int n, int repeats) { dim3 dimBlock(TILE_SIZE,TILE_SIZE); dim3 dimGrid(n/TILE_SIZE,n/TILE_SIZE); for(int i=0; i<repeats; i++) { // matrixMultKernel_global<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_tiled<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_coalesced<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); hipLaunchKernelGGL(( matrixMultKernel_overlap), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad,Bd,Cd,n); } checkCUDAError("matrix multiplication kernel failed"); } /* print Matrix */ void printMatrix(const char* name, float *A, int n) { int i,k; printf("Matrix %s (size %d)\n",name,n); for (i=0; i<n; i++) { for (k=0; k<n; k++) { printf("%f ", A[i*n+k]); } printf("\n"); } } /* * helper function to check for errors in CUDA calls * source: NVIDIA */ void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "\nCuda error (%s): %s.\n", msg, hipGetErrorString( err) ); exit(-1); } } #ifdef QUERY_DEVICES // Print device info void printDeviceInfo(hipDeviceProp_t devProp) { printf("Revision number: %d.%d\n", devProp.major, devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %lu MB\n", devProp.totalGlobalMem / (1024 * 1024)); printf("Total shared memory per block: %lu kB\n", devProp.sharedMemPerBlock / 1024); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %lu MB\n", devProp.memPitch / (1024 * 1024)); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum dimensions of block: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Maximum dimensions of grid: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Clock rate: %d MHz\n", devProp.clockRate / 1000); printf("Total constant memory: %lu kB\n", devProp.totalConstMem / 1024); printf("Texture alignment: %lu B\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("\n"); } #endif
8a7119cfd435318c45b07573b80e255c87176243.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #ifndef M_PI #define M_PI 3.14159265 #endif #include "cuda_mmult_kernels.h" // define macro OUTPUT to print input & output matrix //#define OUTPUT // define macro QUERY_DEVICES to print device information //#define QUERY_DEVICES void checkCUDAError(const char *msg); void zeroMatrix(float *A, int n); void dstMatrix(float *A, int n); void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_matrixMult(float *A, float *B, float *C, int n, int repeats); void CUDA_freeMatrix(float *Ad); void printMatrix(const char* name, float *A, int n); void printDeviceInfo(cudaDeviceProp devProp); int main(int argc, const char *argv[]) { float *A,*B,*C; /* arrays for matrices */ int n, m; /* n=matrix size, m=repeats */ cudaEvent_t start_timer, stop_timer; float gpu_time; #ifdef QUERY_DEVICES // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDeviceInfo(devProp); } #endif if (argc < 3) { printf("Error: please specify matrix size and number of multiplications: \n"); printf("Usage: %s <size> <repeat> \n", argv[0]); exit(1); }; /* read matrix size and number of repeats */ n = atoi(argv[1]); m = atoi(argv[2]); if (n % TILE_SIZE != 0) { printf("Error: matrix size has to be a multiple of tile size %d \n", TILE_SIZE); exit(1); }; cudaEventCreate(&start_timer); cudaEventCreate(&stop_timer); printf("Matrix mult. of size %d (%d repeats): \n", n, m); /* allocate and initialise matrices in host memory */ int size = n*n*sizeof(float); A = (float *) malloc(size); dstMatrix(A,n); B = (float *) malloc(size); dstMatrix(B,n); C = (float *) malloc(size); zeroMatrix(C,n); #ifdef OUTPUT printMatrix("A",A,n); printMatrix("B",B,n); #endif /* allocate matrices in device memory and transfer matrices from host to device memory */ float *Ad, *Bd, *Cd; cudaMalloc((void**)&Ad, size); checkCUDAError("allocate memory for A"); cudaMalloc((void**)&Bd, size); checkCUDAError("allocate memory for B"); cudaMalloc((void**)&Cd, size); checkCUDAError("allocate memory for C"); cudaMemcpy(Ad,A, size, cudaMemcpyHostToDevice); checkCUDAError("memory of A not transferred"); cudaMemcpy(Bd,B, size, cudaMemcpyHostToDevice); checkCUDAError("memory of B not transferred"); cudaMemcpy(Cd,C, size, cudaMemcpyHostToDevice); checkCUDAError("memory of C not transferred"); /* perform matrix multiplication (m repeats) */ cudaEventRecord(start_timer, 0); //CPU_matrixMult(A, B, C, n, m); CUDA_matrixMult(Ad,Bd,Cd,n,m); cudaEventRecord(stop_timer, 0); /* transfer result matrix back from device to host memory and deallocate device matrices */ cudaMemcpy(C,Cd, size, cudaMemcpyDeviceToHost); checkCUDAError("memory of C not transferred back"); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); #ifdef OUTPUT printMatrix("C", C, n); #endif /* deallocate host matrices, print results */ free(A); free(B); free(C); cudaEventSynchronize(stop_timer); cudaEventElapsedTime(&gpu_time, start_timer, stop_timer); printf("Elapsed time : %.3f s \n", gpu_time / 1000.0f); printf("Performance : %.0f MFlop/s \n", float(m) * (2.0f * n - 1.0f) * n * n / (gpu_time / 1000.0f * 1024.f * 1024.f)); cudaEventDestroy(start_timer); cudaEventDestroy(stop_timer); return(0); } /* set Matrix values to zero */ void zeroMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = 0; } /* initialise Matrix: discrete Sine Transform */ void dstMatrix(float *A, int n) { int i,k; for (i=0; i<n; i++) for (k=0; k<n; k++) A[i*n+k] = sin( ((i+1)*(k+1)*M_PI)/(n+1)); } /* * matrix multiplication C += A*B * -> standard C implementation */ void CPU_matrixMult(float *A, float *B, float *C, int n, int repeats) { int i,j,k; float tmp; for(int r=0; r<repeats; r++) { for (i=0; i<n; i++) { for (j=0; j<n; j++) { tmp = A[i*n+j]; for (k=0; k<n; k++) { C[i*n+k] += tmp * B[j*n+k]; } } } } } /* * matrix multiplication C += A*B * -> CUDA implementation: kernel invocation * (implementation adopted from Kirk&Hwu: * "Programming Massively Parallel Processors, chapter 3) */ __host__ void CUDA_matrixMult(float *Ad, float *Bd, float *Cd, int n, int repeats) { dim3 dimBlock(TILE_SIZE,TILE_SIZE); dim3 dimGrid(n/TILE_SIZE,n/TILE_SIZE); for(int i=0; i<repeats; i++) { // matrixMultKernel_global<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_tiled<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); // matrixMultKernel_coalesced<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); matrixMultKernel_overlap<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); } checkCUDAError("matrix multiplication kernel failed"); } /* print Matrix */ void printMatrix(const char* name, float *A, int n) { int i,k; printf("Matrix %s (size %d)\n",name,n); for (i=0; i<n; i++) { for (k=0; k<n; k++) { printf("%f ", A[i*n+k]); } printf("\n"); } } /* * helper function to check for errors in CUDA calls * source: NVIDIA */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "\nCuda error (%s): %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } } #ifdef QUERY_DEVICES // Print device info void printDeviceInfo(cudaDeviceProp devProp) { printf("Revision number: %d.%d\n", devProp.major, devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %lu MB\n", devProp.totalGlobalMem / (1024 * 1024)); printf("Total shared memory per block: %lu kB\n", devProp.sharedMemPerBlock / 1024); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %lu MB\n", devProp.memPitch / (1024 * 1024)); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum dimensions of block: %d %d %d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]); printf("Maximum dimensions of grid: %d %d %d\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]); printf("Clock rate: %d MHz\n", devProp.clockRate / 1000); printf("Total constant memory: %lu kB\n", devProp.totalConstMem / 1024); printf("Texture alignment: %lu B\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); printf("\n"); } #endif
dbfaa40ed15e1bdfe3cad04c8ffb735c76480907.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include "calculation.h" #define NB (4) #define NBb (16) #define NK (4) #define NR (10) #define FILESIZE (16*128*13*16*512) __device__ void gpuSubBytes(int *); __device__ void gpuShiftRows(int *); __device__ int gpumul(int,int); __device__ int gpudataget(void*, int); __device__ void gpuMixColumns(); __device__ void gpuAddRoundKey(int *, int *, int); //__device__ void PrintPlainText(unsigned char *); __device__ void PrintPlainText(int *); __device__ void gpudatadump(const char *, void *, int); __device__ void gpuMixColumns(int *); __device__ void gpuCipher(int *, int *); __global__ void device_aes_encrypt(unsigned char *pt, int *rkey, unsigned char *ct, long int size){ int Sbox[256] = { 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 }; //This kernel executes AES encryption on a GPU. //Please modify this kernel!! int data[NB]; int thread_id = blockDim.x * blockIdx.x + threadIdx.x; memcpy(data, pt+16*thread_id, NBb); //With NB, 16 bytes are defined as 4 words. if(thread_id == 0){ //printf("size = %ld\n", size); //PrintPlainText(data); // printf("----ViewRoundKey----\n"); // for (int j = 0; j < 44; j++){ // printf("%x\n", rkey[j]); // } // gpuAddRoundKey(data, rkey, 0); // printf("----addRoundKey[0]----\n"); // PrintPlainText(data); // for (int rnd = 0; rnd < 10; rnd++){ // // SubBytes // unsigned char *cb = (unsigned char*)data; // for(int i=0; i<NBb; i+=4){ // for(int j=1; j<4; j++){ // cb[i+j] = Sbox[cb[i+j]]; // } // } // printf("----SubBytes----\n"); // PrintPlainText(data); //printf("----ShiftRows----\n"); gpuShiftRows(data); //PrintPlainText(data); // printf("----MixColumns----\n"); // gpuMixColumns(data); // PrintPlainText(data); // printf("----AddRoundkey[%d]----\n", rnd); // gpuAddRoundKey(data, rkey, rnd); // PrintPlainText(data); // printf("--------\n"); // PrintPlainText(data); // } // printf("----SubBytes----\n"); // PrintPlainText(data); // printf("----ShiftRows----\n"); // gpuShiftRows(data); // PrintPlainText(data); // printf("----AddRoundkey[10]----\n"); // gpuAddRoundKey(data, rkey, 10); // printf("----FINISH----\n"); // PrintPlainText(data); //for (int k = 0; k < 4; k++){ // printf("%02x", data[k]); //} memcpy(ct+16*thread_id, data, NBb); //gpudatadump("Ciphertext : ", ct, 4); } // } // printf("You can use printf function to eliminate bugs in your kernel.\n"); // printf("This thread ID is %d.\n", thread_id); //... } __device__ void gpuSubBytes(int *state){ int i, j; unsigned char *cb=(unsigned char*)state; for(i=0; i<NBb; i+=4){ for(j=1; j<4; j++){ //cb[i+j] = Sbox[cb[i+j]]; } } } __device__ void gpuShiftRows(int *state){ int i, j, i4; unsigned char *cb = (unsigned char*)state; unsigned char cw[NBb]; memcpy(cw, cb, sizeof(cw)); for(i = 0;i < NB; i+=4){ i4 = i*4; for(j = 1; j < 4; j++){ cw[i4+j+0*4] = cb[i4+j+((j+0)&3)*4]; cw[i4+j+1*4] = cb[i4+j+((j+1)&3)*4]; cw[i4+j+2*4] = cb[i4+j+((j+2)&3)*4]; cw[i4+j+3*4] = cb[i4+j+((j+3)&3)*4]; } } memcpy(cb,cw,sizeof(cw)); } __device__ int gpumul(int dt,int n){ int i, x = 0; for(i = 8; i > 0; i >>= 1) { x <<= 1; if(x & 0x100) x = (x ^ 0x1b) & 0xff; if((n & i)) x ^= dt; } return(x); } __device__ int gpudataget(void* data, int n){ return(((unsigned char*)data)[n]); } __device__ void gpuMixColumns(int *state){ int i, i4, x; for(i = 0; i< NB; i++){ i4 = i*4; x = gpumul(gpudataget(state,i4+0),2) ^ gpumul(gpudataget(state,i4+1),3) ^ gpumul(gpudataget(state,i4+2),1) ^ gpumul(gpudataget(state,i4+3),1); x |= (gpumul(gpudataget(state,i4+1),2) ^ gpumul(gpudataget(state,i4+2),3) ^ gpumul(gpudataget(state,i4+3),1) ^ gpumul(gpudataget(state,i4+0),1)) << 8; x |= (gpumul(gpudataget(state,i4+2),2) ^ gpumul(gpudataget(state,i4+3),3) ^ gpumul(gpudataget(state,i4+0),1) ^ gpumul(gpudataget(state,i4+1),1)) << 16; x |= (gpumul(gpudataget(state,i4+3),2) ^ gpumul(gpudataget(state,i4+0),3) ^ gpumul(gpudataget(state,i4+1),1) ^ gpumul(gpudataget(state,i4+2),1)) << 24; state[i] = x; } } __device__ void gpuAddRoundKey(int *state, int *w, int n){ int i; for(i = 0; i < NB; i++) { state[i] ^= w[i + NB * n]; } } //__device__ void PrintPlainText(unsigned char *state){ __device__ void PrintPlainText(int *state){ int i; unsigned char *cdt = (unsigned char *)state; for (i = 0; i < 16; i++) { printf("%02x", cdt[i]); } printf("\n"); } __device__ void gpudatadump(const char *c, void *dt, int len){ int i; unsigned char *cdt = (unsigned char *)dt; printf("%s", c); for(i = 0; i < len*4;i++){ printf("%02x", cdt[i]); } printf("\n"); } __device__ void gpuCipher(int *state, int *rkey){ int rnd; //int i; gpuAddRoundKey(state, rkey, 0); for(rnd = 1; rnd < NR; rnd++){ gpuSubBytes(state); gpuShiftRows(state); gpuMixColumns(state); gpuAddRoundKey(state, rkey, rnd); } gpuSubBytes(state); gpuShiftRows(state); gpuAddRoundKey(state, rkey, rnd); //return 0; } void launch_aes_kernel(unsigned char *pt, int *rk, unsigned char *ct, long int size){ //This function launches the AES kernel. //Please modify this function for AES kernel. //In this function, you need to allocate the device memory and so on. unsigned char *d_pt, *d_ct; int *d_rkey; dim3 dim_grid(FILESIZE/16/512,1,1), dim_block(512,1,1); hipMalloc((void **)&d_pt, sizeof(unsigned char)*size); hipMalloc((void **)&d_rkey, sizeof(int)*44); hipMalloc((void **)&d_ct, sizeof(unsigned char)*size); //hipMemset(d_pt, 0, sizeof(unsigned char)*size); hipMemcpy(d_pt, pt, sizeof(unsigned char)*size, hipMemcpyHostToDevice); hipMemcpy(d_rkey, rk, sizeof(int)*44, hipMemcpyHostToDevice); hipLaunchKernelGGL(( device_aes_encrypt), dim3(dim_grid), dim3(dim_block), 0, 0, d_pt, d_rkey, d_ct, size); hipMemcpy(ct, d_ct, sizeof(unsigned char)*size, hipMemcpyDeviceToHost); hipFree(d_pt); hipFree(d_ct); }
dbfaa40ed15e1bdfe3cad04c8ffb735c76480907.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "calculation.h" #define NB (4) #define NBb (16) #define NK (4) #define NR (10) #define FILESIZE (16*128*13*16*512) __device__ void gpuSubBytes(int *); __device__ void gpuShiftRows(int *); __device__ int gpumul(int,int); __device__ int gpudataget(void*, int); __device__ void gpuMixColumns(); __device__ void gpuAddRoundKey(int *, int *, int); //__device__ void PrintPlainText(unsigned char *); __device__ void PrintPlainText(int *); __device__ void gpudatadump(const char *, void *, int); __device__ void gpuMixColumns(int *); __device__ void gpuCipher(int *, int *); __global__ void device_aes_encrypt(unsigned char *pt, int *rkey, unsigned char *ct, long int size){ int Sbox[256] = { 0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, 0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, 0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, 0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, 0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, 0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, 0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, 0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, 0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, 0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, 0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, 0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, 0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, 0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, 0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, 0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 }; //This kernel executes AES encryption on a GPU. //Please modify this kernel!! int data[NB]; int thread_id = blockDim.x * blockIdx.x + threadIdx.x; memcpy(data, pt+16*thread_id, NBb); //With NB, 16 bytes are defined as 4 words. if(thread_id == 0){ //printf("size = %ld\n", size); //PrintPlainText(data); // printf("----ViewRoundKey----\n"); // for (int j = 0; j < 44; j++){ // printf("%x\n", rkey[j]); // } // gpuAddRoundKey(data, rkey, 0); // printf("----addRoundKey[0]----\n"); // PrintPlainText(data); // for (int rnd = 0; rnd < 10; rnd++){ // // SubBytes // unsigned char *cb = (unsigned char*)data; // for(int i=0; i<NBb; i+=4){ // for(int j=1; j<4; j++){ // cb[i+j] = Sbox[cb[i+j]]; // } // } // printf("----SubBytes----\n"); // PrintPlainText(data); //printf("----ShiftRows----\n"); gpuShiftRows(data); //PrintPlainText(data); // printf("----MixColumns----\n"); // gpuMixColumns(data); // PrintPlainText(data); // printf("----AddRoundkey[%d]----\n", rnd); // gpuAddRoundKey(data, rkey, rnd); // PrintPlainText(data); // printf("--------\n"); // PrintPlainText(data); // } // printf("----SubBytes----\n"); // PrintPlainText(data); // printf("----ShiftRows----\n"); // gpuShiftRows(data); // PrintPlainText(data); // printf("----AddRoundkey[10]----\n"); // gpuAddRoundKey(data, rkey, 10); // printf("----FINISH----\n"); // PrintPlainText(data); //for (int k = 0; k < 4; k++){ // printf("%02x", data[k]); //} memcpy(ct+16*thread_id, data, NBb); //gpudatadump("Ciphertext : ", ct, 4); } // } // printf("You can use printf function to eliminate bugs in your kernel.\n"); // printf("This thread ID is %d.\n", thread_id); //... } __device__ void gpuSubBytes(int *state){ int i, j; unsigned char *cb=(unsigned char*)state; for(i=0; i<NBb; i+=4){ for(j=1; j<4; j++){ //cb[i+j] = Sbox[cb[i+j]]; } } } __device__ void gpuShiftRows(int *state){ int i, j, i4; unsigned char *cb = (unsigned char*)state; unsigned char cw[NBb]; memcpy(cw, cb, sizeof(cw)); for(i = 0;i < NB; i+=4){ i4 = i*4; for(j = 1; j < 4; j++){ cw[i4+j+0*4] = cb[i4+j+((j+0)&3)*4]; cw[i4+j+1*4] = cb[i4+j+((j+1)&3)*4]; cw[i4+j+2*4] = cb[i4+j+((j+2)&3)*4]; cw[i4+j+3*4] = cb[i4+j+((j+3)&3)*4]; } } memcpy(cb,cw,sizeof(cw)); } __device__ int gpumul(int dt,int n){ int i, x = 0; for(i = 8; i > 0; i >>= 1) { x <<= 1; if(x & 0x100) x = (x ^ 0x1b) & 0xff; if((n & i)) x ^= dt; } return(x); } __device__ int gpudataget(void* data, int n){ return(((unsigned char*)data)[n]); } __device__ void gpuMixColumns(int *state){ int i, i4, x; for(i = 0; i< NB; i++){ i4 = i*4; x = gpumul(gpudataget(state,i4+0),2) ^ gpumul(gpudataget(state,i4+1),3) ^ gpumul(gpudataget(state,i4+2),1) ^ gpumul(gpudataget(state,i4+3),1); x |= (gpumul(gpudataget(state,i4+1),2) ^ gpumul(gpudataget(state,i4+2),3) ^ gpumul(gpudataget(state,i4+3),1) ^ gpumul(gpudataget(state,i4+0),1)) << 8; x |= (gpumul(gpudataget(state,i4+2),2) ^ gpumul(gpudataget(state,i4+3),3) ^ gpumul(gpudataget(state,i4+0),1) ^ gpumul(gpudataget(state,i4+1),1)) << 16; x |= (gpumul(gpudataget(state,i4+3),2) ^ gpumul(gpudataget(state,i4+0),3) ^ gpumul(gpudataget(state,i4+1),1) ^ gpumul(gpudataget(state,i4+2),1)) << 24; state[i] = x; } } __device__ void gpuAddRoundKey(int *state, int *w, int n){ int i; for(i = 0; i < NB; i++) { state[i] ^= w[i + NB * n]; } } //__device__ void PrintPlainText(unsigned char *state){ __device__ void PrintPlainText(int *state){ int i; unsigned char *cdt = (unsigned char *)state; for (i = 0; i < 16; i++) { printf("%02x", cdt[i]); } printf("\n"); } __device__ void gpudatadump(const char *c, void *dt, int len){ int i; unsigned char *cdt = (unsigned char *)dt; printf("%s", c); for(i = 0; i < len*4;i++){ printf("%02x", cdt[i]); } printf("\n"); } __device__ void gpuCipher(int *state, int *rkey){ int rnd; //int i; gpuAddRoundKey(state, rkey, 0); for(rnd = 1; rnd < NR; rnd++){ gpuSubBytes(state); gpuShiftRows(state); gpuMixColumns(state); gpuAddRoundKey(state, rkey, rnd); } gpuSubBytes(state); gpuShiftRows(state); gpuAddRoundKey(state, rkey, rnd); //return 0; } void launch_aes_kernel(unsigned char *pt, int *rk, unsigned char *ct, long int size){ //This function launches the AES kernel. //Please modify this function for AES kernel. //In this function, you need to allocate the device memory and so on. unsigned char *d_pt, *d_ct; int *d_rkey; dim3 dim_grid(FILESIZE/16/512,1,1), dim_block(512,1,1); cudaMalloc((void **)&d_pt, sizeof(unsigned char)*size); cudaMalloc((void **)&d_rkey, sizeof(int)*44); cudaMalloc((void **)&d_ct, sizeof(unsigned char)*size); //cudaMemset(d_pt, 0, sizeof(unsigned char)*size); cudaMemcpy(d_pt, pt, sizeof(unsigned char)*size, cudaMemcpyHostToDevice); cudaMemcpy(d_rkey, rk, sizeof(int)*44, cudaMemcpyHostToDevice); device_aes_encrypt<<<dim_grid, dim_block>>>(d_pt, d_rkey, d_ct, size); cudaMemcpy(ct, d_ct, sizeof(unsigned char)*size, cudaMemcpyDeviceToHost); cudaFree(d_pt); cudaFree(d_ct); }
b45d9ad3cb7efc0e600702c6f8c3e9950480b48b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=[12,24] --blockDim=[32,16] #define k_blockDimX 32 #define k_blockDimMaxY 16 // Note: If you change the RADIUS, you should also change the unrolling below #define RADIUS 4 __constant__ float stencil[RADIUS + 1]; __global__ void FiniteDifferencesKernel(float *output, const float *input, const int dimx, const int dimy, const int dimz) { __requires(dimx == 96); __requires(dimy == 96); __requires(dimz == 96); bool validr = true; bool validw = true; const int gtidx = blockIdx.x * blockDim.x + threadIdx.x; const int gtidy = blockIdx.y * blockDim.y + threadIdx.y; const int ltidx = threadIdx.x; const int ltidy = threadIdx.y; const int workx = blockDim.x; const int worky = blockDim.y; __shared__ float tile[k_blockDimMaxY + 2 * RADIUS][k_blockDimX + 2 * RADIUS]; const int stride_y = dimx + 2 * RADIUS; const int stride_z = stride_y * (dimy + 2 * RADIUS); int inputIndex = 0; int outputIndex = 0; // Advance inputIndex to start of inner volume inputIndex += RADIUS * stride_y + RADIUS; // Advance inputIndex to target element inputIndex += gtidy * stride_y + gtidx; float infront[RADIUS]; float behind[RADIUS]; float current; const int tx = ltidx + RADIUS; const int ty = ltidy + RADIUS; // Check in bounds if ((gtidx >= dimx + RADIUS) || (gtidy >= dimy + RADIUS)) validr = false; if ((gtidx >= dimx) || (gtidy >= dimy)) validw = false; // Preload the "infront" and "behind" data for (int i = RADIUS - 2 ; __invariant((inputIndex - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx)) == stride_z * ((RADIUS - 2) - i)), __invariant(i >= -1), i >= 0 ; i--) { if (validr) behind[i] = input[inputIndex]; inputIndex += stride_z; } if (validr) current = input[inputIndex]; outputIndex = inputIndex; inputIndex += stride_z; for (int i = 0 ; i < RADIUS ; i++) { if (validr) infront[i] = input[inputIndex]; inputIndex += stride_z; } // Step through the xy-planes #pragma unroll 9 for (int iz = 0 ; __invariant((outputIndex - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx)) == stride_z * (3 + iz)), __invariant(__write_implies(output, (__write_offset_bytes(output)/sizeof(float) - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx))%stride_z == 0)), __invariant(__implies(gtidx >= dimx, !__write(output))), __invariant(__implies(gtidy >= dimy, !__write(output))), iz < dimz ; iz++) { // Advance the slice (move the thread-front) for (int i = RADIUS - 1 ; i > 0 ; i--) behind[i] = behind[i - 1]; behind[0] = current; current = infront[0]; #pragma unroll 4 for (int i = 0 ; i < RADIUS - 1 ; i++) infront[i] = infront[i + 1]; if (validr) infront[RADIUS - 1] = input[inputIndex]; inputIndex += stride_z; outputIndex += stride_z; __syncthreads(); // Note that for the work items on the boundary of the problem, the // supplied index when reading the halo (below) may wrap to the // previous/next row or even the previous/next xy-plane. This is // acceptable since a) we disable the output write for these work // items and b) there is at least one xy-plane before/after the // current plane, so the access will be within bounds. // Update the data slice in the local tile // Halo above & below if (ltidy < RADIUS) { tile[ltidy][tx] = input[outputIndex - RADIUS * stride_y]; tile[ltidy + worky + RADIUS][tx] = input[outputIndex + worky * stride_y]; } // Halo left & right if (ltidx < RADIUS) { tile[ty][ltidx] = input[outputIndex - RADIUS]; tile[ty][ltidx + workx + RADIUS] = input[outputIndex + workx]; } tile[ty][tx] = current; __syncthreads(); // Compute the output value float value = stencil[0] * current; #pragma unroll 4 for (int i = 1 ; i <= RADIUS ; i++) { value += stencil[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] + tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]); } // Store the output value if (validw) output[outputIndex] = value; } }
b45d9ad3cb7efc0e600702c6f8c3e9950480b48b.cu
//pass //--gridDim=[12,24] --blockDim=[32,16] #define k_blockDimX 32 #define k_blockDimMaxY 16 // Note: If you change the RADIUS, you should also change the unrolling below #define RADIUS 4 __constant__ float stencil[RADIUS + 1]; __global__ void FiniteDifferencesKernel(float *output, const float *input, const int dimx, const int dimy, const int dimz) { __requires(dimx == 96); __requires(dimy == 96); __requires(dimz == 96); bool validr = true; bool validw = true; const int gtidx = blockIdx.x * blockDim.x + threadIdx.x; const int gtidy = blockIdx.y * blockDim.y + threadIdx.y; const int ltidx = threadIdx.x; const int ltidy = threadIdx.y; const int workx = blockDim.x; const int worky = blockDim.y; __shared__ float tile[k_blockDimMaxY + 2 * RADIUS][k_blockDimX + 2 * RADIUS]; const int stride_y = dimx + 2 * RADIUS; const int stride_z = stride_y * (dimy + 2 * RADIUS); int inputIndex = 0; int outputIndex = 0; // Advance inputIndex to start of inner volume inputIndex += RADIUS * stride_y + RADIUS; // Advance inputIndex to target element inputIndex += gtidy * stride_y + gtidx; float infront[RADIUS]; float behind[RADIUS]; float current; const int tx = ltidx + RADIUS; const int ty = ltidy + RADIUS; // Check in bounds if ((gtidx >= dimx + RADIUS) || (gtidy >= dimy + RADIUS)) validr = false; if ((gtidx >= dimx) || (gtidy >= dimy)) validw = false; // Preload the "infront" and "behind" data for (int i = RADIUS - 2 ; __invariant((inputIndex - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx)) == stride_z * ((RADIUS - 2) - i)), __invariant(i >= -1), i >= 0 ; i--) { if (validr) behind[i] = input[inputIndex]; inputIndex += stride_z; } if (validr) current = input[inputIndex]; outputIndex = inputIndex; inputIndex += stride_z; for (int i = 0 ; i < RADIUS ; i++) { if (validr) infront[i] = input[inputIndex]; inputIndex += stride_z; } // Step through the xy-planes #pragma unroll 9 for (int iz = 0 ; __invariant((outputIndex - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx)) == stride_z * (3 + iz)), __invariant(__write_implies(output, (__write_offset_bytes(output)/sizeof(float) - ((RADIUS + gtidy) * stride_y + RADIUS + gtidx))%stride_z == 0)), __invariant(__implies(gtidx >= dimx, !__write(output))), __invariant(__implies(gtidy >= dimy, !__write(output))), iz < dimz ; iz++) { // Advance the slice (move the thread-front) for (int i = RADIUS - 1 ; i > 0 ; i--) behind[i] = behind[i - 1]; behind[0] = current; current = infront[0]; #pragma unroll 4 for (int i = 0 ; i < RADIUS - 1 ; i++) infront[i] = infront[i + 1]; if (validr) infront[RADIUS - 1] = input[inputIndex]; inputIndex += stride_z; outputIndex += stride_z; __syncthreads(); // Note that for the work items on the boundary of the problem, the // supplied index when reading the halo (below) may wrap to the // previous/next row or even the previous/next xy-plane. This is // acceptable since a) we disable the output write for these work // items and b) there is at least one xy-plane before/after the // current plane, so the access will be within bounds. // Update the data slice in the local tile // Halo above & below if (ltidy < RADIUS) { tile[ltidy][tx] = input[outputIndex - RADIUS * stride_y]; tile[ltidy + worky + RADIUS][tx] = input[outputIndex + worky * stride_y]; } // Halo left & right if (ltidx < RADIUS) { tile[ty][ltidx] = input[outputIndex - RADIUS]; tile[ty][ltidx + workx + RADIUS] = input[outputIndex + workx]; } tile[ty][tx] = current; __syncthreads(); // Compute the output value float value = stencil[0] * current; #pragma unroll 4 for (int i = 1 ; i <= RADIUS ; i++) { value += stencil[i] * (infront[i-1] + behind[i-1] + tile[ty - i][tx] + tile[ty + i][tx] + tile[ty][tx - i] + tile[ty][tx + i]); } // Store the output value if (validw) output[outputIndex] = value; } }
24117684e850b9abc3035e2fe37aa21ec2296cbf.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2019 by Contributors * \file kernel/cuda/binary_reduce_prod.cu * \brief CUDA kernels for binary reduce prod */ #include "binary_reduce_impl_hip.cuh" #include "./backward_binary_reduce_impl.cuh" namespace dgl { namespace kernel { #define REDUCER ReduceProd #define XPU kDLGPU #define IDX int32_t EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE) EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE) } // namespace kernel } // namespace dgl
24117684e850b9abc3035e2fe37aa21ec2296cbf.cu
/*! * Copyright (c) 2019 by Contributors * \file kernel/cuda/binary_reduce_prod.cu * \brief CUDA kernels for binary reduce prod */ #include "./binary_reduce_impl.cuh" #include "./backward_binary_reduce_impl.cuh" namespace dgl { namespace kernel { #define REDUCER ReduceProd #define XPU kDLGPU #define IDX int32_t EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE) EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE) } // namespace kernel } // namespace dgl
ff922d440979b1f487caf7a789a544185cbe963b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "operators/utils/proposal_op.h" #include "utils/cuda_device.h" namespace dragon { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype* list_cpu, const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> __device__ static Dtype iou(const Dtype A[], const Dtype B[]) { // overlapped region (= box) const Dtype x1 = max(A[0], B[0]); const Dtype y1 = max(A[1], B[1]); const Dtype x2 = min(A[2], B[2]); const Dtype y2 = min(A[3], B[3]); // intersection area const Dtype width = max((Dtype)0, x2 - x1 + (Dtype)1); const Dtype height = max((Dtype)0, y2 - y1 + (Dtype)1); const Dtype area = width * height; // area of A, B const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1); const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1); // IoU return area / (A_area + B_area - area); } #define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y)) static const int nms_block_size = 64; template <typename Dtype> __global__ static void nms_mask(const Dtype boxes[], unsigned long long mask[], const int num_boxes, const Dtype nms_thresh) { // block region // j = j_start + { 0, ..., dj_end - 1 } // i = i_start + { 0, ..., di_end - 1 } const int i_start = blockIdx.x * nms_block_size; const int di_end = min(num_boxes - i_start, nms_block_size); const int j_start = blockIdx.y * nms_block_size; const int dj_end = min(num_boxes - j_start, nms_block_size); // copy all i-th boxes to GPU cache // i = i_start + { 0, ..., di_end - 1 } __shared__ Dtype boxes_i[nms_block_size * 4]; { const int di = threadIdx.x; if (di < di_end) { boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0]; boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1]; boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2]; boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3]; } } __syncthreads(); // given j = j_start + dj, // check whether box i is significantly overlapped with box j // (i.e., IoU(box j, box i) > threshold) // for all i = i_start + { 0, ..., di_end - 1 } except for i == j { const int dj = threadIdx.x; if (dj < dj_end) { // box j const Dtype* const box_j = boxes + (j_start + dj) * 5; // mask for significant overlap // if IoU(box j, box i) > threshold, di-th bit = 1 unsigned long long mask_j = 0; // check for all i = i_start + { 0, ..., di_end - 1 } // except for i == j const int di_start = (i_start == j_start) ? (dj + 1) : 0; for (int di = di_start; di < di_end; ++di) { // box i const Dtype* const box_i = boxes_i + di * 4; // if IoU(box j, box i) > threshold, di-th bit = 1 if (iou(box_j, box_i) > nms_thresh) { mask_j |= 1ULL << di; } } // mask: "num_boxes x num_blocks" array // for mask[j][bi], "di-th bit = 1" means: // box j is significantly overlapped with box i = i_start + di, // where i_start = bi * block_size { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); const int bi = blockIdx.x; mask[(j_start + dj) * num_blocks + bi] = mask_j; } } // endif dj < dj_end } } template <typename Dtype> void nms_gpu(const int num_boxes, const Dtype boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const Dtype nms_thresh, const int max_num_out) { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); { const dim3 blocks(num_blocks, num_blocks); vector<TIndex> mask_shape(2); mask_shape[0] = num_boxes; mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int); p_mask->Reshape(mask_shape); // find all significantly-overlapped pairs of boxes nms_mask << <blocks, nms_block_size >> >( boxes_gpu, (unsigned long long*)p_mask->template mutable_data<int, CUDAContext>(), num_boxes, nms_thresh); CUDA_POST_KERNEL_CHECK; } // discard i-th box if it is significantly overlapped with // one or more previous (= scored higher) boxes { const unsigned long long* p_mask_cpu = (unsigned long long*)p_mask->mutable_data<int, CPUContext>(); int num_selected = 0; vector<unsigned long long> dead_bit(num_blocks); for (int i = 0; i < num_blocks; ++i) { dead_bit[i] = 0; } for (int i = 0; i < num_boxes; ++i) { const int nblock = i / nms_block_size; const int inblock = i % nms_block_size; if (!(dead_bit[nblock] & (1ULL << inblock))) { index_out_cpu[num_selected++] = base_index + i; const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks; for (int j = nblock; j < num_blocks; ++j) { dead_bit[j] |= mask_i[j]; } if (num_selected == max_num_out) { break; } } } *num_out = num_selected; } } template void nms_gpu(const int num_boxes, const float boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const float nms_thresh, const int max_num_out); template void nms_gpu(const int num_boxes, const double boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const double nms_thresh, const int max_num_out); template <class Context> template <typename T> void ProposalOp<Context>::RunWithType() { auto* p_bottom_item = this->input(0).template data<T, CUDAContext>(); auto* p_d_anchor_item = this->input(1).template data<T, CUDAContext>(); auto* p_img_info_cpu = this->input(2).template data<T, CPUContext>(); auto* p_roi_item = this->output(0)->template mutable_data<T, CUDAContext>(); auto* p_score_item = (this->OutputSize() > 1) ? this->output(1)->template mutable_data<T, CUDAContext>() : NULL; vector<TIndex> proposals_shape(2), top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < this->input(0).dim(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = this->input(0).dim(2); const int bottom_W = this->input(0).dim(3); // input image height & width const T img_H = p_img_info_cpu[0]; const T img_W = p_img_info_cpu[1]; // scale factor for height & width const T scale_H = p_img_info_cpu[2]; const T scale_W = p_img_info_cpu[3]; // minimum box width & height const T min_box_H = min_size_ * scale_H; const T min_box_W = min_size_ * scale_W; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = ::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); enumerate_proposals_gpu<T> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.template data<T, CUDAContext>(), proposals_.template mutable_data<T, CUDAContext>(), anchors_.dim(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box<T>(proposals_.template mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu<T>(pre_nms_topn, proposals_.template data<T, CUDAContext>(), &nms_mask_, roi_indices_.template mutable_data<int, CPUContext>(), &num_rois, 0, nms_thresh_, post_nms_topn_); retrieve_rois_gpu<T> << <GET_BLOCKS(num_rois), CUDA_NUM_THREADS >> >(num_rois, n, proposals_.template data<T, CUDAContext>(), roi_indices_.template data<int, CUDAContext>(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; } this->output(0)->Reshape(top_shape); if (this->OutputSize() > 1) { top_shape.pop_back(); this->output(1)->Reshape(top_shape); } } template void ProposalOp<CUDAContext>::RunWithType<float>(); }
ff922d440979b1f487caf7a789a544185cbe963b.cu
#include "operators/utils/proposal_op.h" #include "utils/cuda_device.h" namespace dragon { template <typename Dtype> __device__ static int transform_box(Dtype box[], const Dtype dx, const Dtype dy, const Dtype d_log_w, const Dtype d_log_h, const Dtype img_W, const Dtype img_H, const Dtype min_box_W, const Dtype min_box_H) { // width & height of box const Dtype w = box[2] - box[0] + (Dtype)1; const Dtype h = box[3] - box[1] + (Dtype)1; // center location of box const Dtype ctr_x = box[0] + (Dtype)0.5 * w; const Dtype ctr_y = box[1] + (Dtype)0.5 * h; // new center location according to gradient (dx, dy) const Dtype pred_ctr_x = dx * w + ctr_x; const Dtype pred_ctr_y = dy * h + ctr_y; // new width & height according to gradient d(log w), d(log h) const Dtype pred_w = exp(d_log_w) * w; const Dtype pred_h = exp(d_log_h) * h; // update upper-left corner location box[0] = pred_ctr_x - (Dtype)0.5 * pred_w; box[1] = pred_ctr_y - (Dtype)0.5 * pred_h; // update lower-right corner location box[2] = pred_ctr_x + (Dtype)0.5 * pred_w; box[3] = pred_ctr_y + (Dtype)0.5 * pred_h; // adjust new corner locations to be within the image region, box[0] = max((Dtype)0, min(box[0], img_W - (Dtype)1)); box[1] = max((Dtype)0, min(box[1], img_H - (Dtype)1)); box[2] = max((Dtype)0, min(box[2], img_W - (Dtype)1)); box[3] = max((Dtype)0, min(box[3], img_H - (Dtype)1)); // recompute new width & height const Dtype box_w = box[2] - box[0] + (Dtype)1; const Dtype box_h = box[3] - box[1] + (Dtype)1; // check if new box's size >= threshold return (box_w >= min_box_W) * (box_h >= min_box_H); } template <typename Dtype> static void sort_box(Dtype* list_cpu, const int start, const int end, const int num_top) { const Dtype pivot_score = list_cpu[start * 5 + 4]; int left = start + 1, right = end; Dtype temp[5]; while (left <= right) { while (left <= end && list_cpu[left * 5 + 4] >= pivot_score) ++left; while (right > start && list_cpu[right * 5 + 4] <= pivot_score) --right; if (left <= right) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[left * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[left * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } ++left; --right; } } if (right > start) { for (int i = 0; i < 5; ++i) { temp[i] = list_cpu[start * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[start * 5 + i] = list_cpu[right * 5 + i]; } for (int i = 0; i < 5; ++i) { list_cpu[right * 5 + i] = temp[i]; } } if (start < right - 1) { sort_box(list_cpu, start, right - 1, num_top); } if (right + 1 < num_top && right + 1 < end) { sort_box(list_cpu, right + 1, end, num_top); } } template <typename Dtype> __global__ static void enumerate_proposals_gpu(const int nthreads, const Dtype bottom4d[], const Dtype d_anchor4d[], const Dtype anchors[], Dtype proposals[], const int num_anchors, const int bottom_H, const int bottom_W, const Dtype img_H, const Dtype img_W, const Dtype min_box_H, const Dtype min_box_W, const int feat_stride) { CUDA_KERNEL_LOOP(index, nthreads) { const int h = index / num_anchors / bottom_W; const int w = (index / num_anchors) % bottom_W; const int k = index % num_anchors; const Dtype x = w * feat_stride; const Dtype y = h * feat_stride; const Dtype* p_box = d_anchor4d + h * bottom_W + w; const Dtype* p_score = bottom4d + h * bottom_W + w; const int bottom_area = bottom_H * bottom_W; const Dtype dx = p_box[(k * 4 + 0) * bottom_area]; const Dtype dy = p_box[(k * 4 + 1) * bottom_area]; const Dtype d_log_w = p_box[(k * 4 + 2) * bottom_area]; const Dtype d_log_h = p_box[(k * 4 + 3) * bottom_area]; Dtype* const p_proposal = proposals + index * 5; p_proposal[0] = x + anchors[k * 4 + 0]; p_proposal[1] = y + anchors[k * 4 + 1]; p_proposal[2] = x + anchors[k * 4 + 2]; p_proposal[3] = y + anchors[k * 4 + 3]; p_proposal[4] = transform_box(p_proposal, dx, dy, d_log_w, d_log_h, img_W, img_H, min_box_W, min_box_H) * p_score[k * bottom_area]; } } template <typename Dtype> __global__ static void retrieve_rois_gpu(const int nthreads, const int item_index, const Dtype proposals[], const int roi_indices[], Dtype rois[], Dtype roi_scores[]) { CUDA_KERNEL_LOOP(index, nthreads) { const Dtype* const proposals_index = proposals + roi_indices[index] * 5; rois[index * 5 + 0] = item_index; rois[index * 5 + 1] = proposals_index[0]; rois[index * 5 + 2] = proposals_index[1]; rois[index * 5 + 3] = proposals_index[2]; rois[index * 5 + 4] = proposals_index[3]; if (roi_scores) { roi_scores[index] = proposals_index[4]; } } } template <typename Dtype> __device__ static Dtype iou(const Dtype A[], const Dtype B[]) { // overlapped region (= box) const Dtype x1 = max(A[0], B[0]); const Dtype y1 = max(A[1], B[1]); const Dtype x2 = min(A[2], B[2]); const Dtype y2 = min(A[3], B[3]); // intersection area const Dtype width = max((Dtype)0, x2 - x1 + (Dtype)1); const Dtype height = max((Dtype)0, y2 - y1 + (Dtype)1); const Dtype area = width * height; // area of A, B const Dtype A_area = (A[2] - A[0] + (Dtype)1) * (A[3] - A[1] + (Dtype)1); const Dtype B_area = (B[2] - B[0] + (Dtype)1) * (B[3] - B[1] + (Dtype)1); // IoU return area / (A_area + B_area - area); } #define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y)) static const int nms_block_size = 64; template <typename Dtype> __global__ static void nms_mask(const Dtype boxes[], unsigned long long mask[], const int num_boxes, const Dtype nms_thresh) { // block region // j = j_start + { 0, ..., dj_end - 1 } // i = i_start + { 0, ..., di_end - 1 } const int i_start = blockIdx.x * nms_block_size; const int di_end = min(num_boxes - i_start, nms_block_size); const int j_start = blockIdx.y * nms_block_size; const int dj_end = min(num_boxes - j_start, nms_block_size); // copy all i-th boxes to GPU cache // i = i_start + { 0, ..., di_end - 1 } __shared__ Dtype boxes_i[nms_block_size * 4]; { const int di = threadIdx.x; if (di < di_end) { boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0]; boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1]; boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2]; boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3]; } } __syncthreads(); // given j = j_start + dj, // check whether box i is significantly overlapped with box j // (i.e., IoU(box j, box i) > threshold) // for all i = i_start + { 0, ..., di_end - 1 } except for i == j { const int dj = threadIdx.x; if (dj < dj_end) { // box j const Dtype* const box_j = boxes + (j_start + dj) * 5; // mask for significant overlap // if IoU(box j, box i) > threshold, di-th bit = 1 unsigned long long mask_j = 0; // check for all i = i_start + { 0, ..., di_end - 1 } // except for i == j const int di_start = (i_start == j_start) ? (dj + 1) : 0; for (int di = di_start; di < di_end; ++di) { // box i const Dtype* const box_i = boxes_i + di * 4; // if IoU(box j, box i) > threshold, di-th bit = 1 if (iou(box_j, box_i) > nms_thresh) { mask_j |= 1ULL << di; } } // mask: "num_boxes x num_blocks" array // for mask[j][bi], "di-th bit = 1" means: // box j is significantly overlapped with box i = i_start + di, // where i_start = bi * block_size { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); const int bi = blockIdx.x; mask[(j_start + dj) * num_blocks + bi] = mask_j; } } // endif dj < dj_end } } template <typename Dtype> void nms_gpu(const int num_boxes, const Dtype boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const Dtype nms_thresh, const int max_num_out) { const int num_blocks = DIV_THEN_CEIL(num_boxes, nms_block_size); { const dim3 blocks(num_blocks, num_blocks); vector<TIndex> mask_shape(2); mask_shape[0] = num_boxes; mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int); p_mask->Reshape(mask_shape); // find all significantly-overlapped pairs of boxes nms_mask << <blocks, nms_block_size >> >( boxes_gpu, (unsigned long long*)p_mask->template mutable_data<int, CUDAContext>(), num_boxes, nms_thresh); CUDA_POST_KERNEL_CHECK; } // discard i-th box if it is significantly overlapped with // one or more previous (= scored higher) boxes { const unsigned long long* p_mask_cpu = (unsigned long long*)p_mask->mutable_data<int, CPUContext>(); int num_selected = 0; vector<unsigned long long> dead_bit(num_blocks); for (int i = 0; i < num_blocks; ++i) { dead_bit[i] = 0; } for (int i = 0; i < num_boxes; ++i) { const int nblock = i / nms_block_size; const int inblock = i % nms_block_size; if (!(dead_bit[nblock] & (1ULL << inblock))) { index_out_cpu[num_selected++] = base_index + i; const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks; for (int j = nblock; j < num_blocks; ++j) { dead_bit[j] |= mask_i[j]; } if (num_selected == max_num_out) { break; } } } *num_out = num_selected; } } template void nms_gpu(const int num_boxes, const float boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const float nms_thresh, const int max_num_out); template void nms_gpu(const int num_boxes, const double boxes_gpu[], Tensor* p_mask, int index_out_cpu[], int* const num_out, const int base_index, const double nms_thresh, const int max_num_out); template <class Context> template <typename T> void ProposalOp<Context>::RunWithType() { auto* p_bottom_item = this->input(0).template data<T, CUDAContext>(); auto* p_d_anchor_item = this->input(1).template data<T, CUDAContext>(); auto* p_img_info_cpu = this->input(2).template data<T, CPUContext>(); auto* p_roi_item = this->output(0)->template mutable_data<T, CUDAContext>(); auto* p_score_item = (this->OutputSize() > 1) ? this->output(1)->template mutable_data<T, CUDAContext>() : NULL; vector<TIndex> proposals_shape(2), top_shape(2); proposals_shape[0] = 0; proposals_shape[1] = 5; top_shape[0] = 0; top_shape[1] = 5; for (int n = 0; n < this->input(0).dim(0); ++n) { // bottom shape: (2 x num_anchors) x H x W const int bottom_H = this->input(0).dim(2); const int bottom_W = this->input(0).dim(3); // input image height & width const T img_H = p_img_info_cpu[0]; const T img_W = p_img_info_cpu[1]; // scale factor for height & width const T scale_H = p_img_info_cpu[2]; const T scale_W = p_img_info_cpu[3]; // minimum box width & height const T min_box_H = min_size_ * scale_H; const T min_box_W = min_size_ * scale_W; // number of all proposals = num_anchors * H * W const int num_proposals = anchors_.dim(0) * bottom_H * bottom_W; // number of top-n proposals before NMS const int pre_nms_topn = std::min(num_proposals, pre_nms_topn_); // number of final RoIs int num_rois = 0; // enumerate all proposals // num_proposals = num_anchors * H * W // (x1, y1, x2, y2, score) for each proposal // NOTE: for bottom, only foreground scores are passed proposals_shape[0] = num_proposals; proposals_.Reshape(proposals_shape); enumerate_proposals_gpu<T> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals, p_bottom_item + num_proposals, p_d_anchor_item, anchors_.template data<T, CUDAContext>(), proposals_.template mutable_data<T, CUDAContext>(), anchors_.dim(0), bottom_H, bottom_W, img_H, img_W, min_box_H, min_box_W, feat_stride_); CUDA_POST_KERNEL_CHECK; sort_box<T>(proposals_.template mutable_data<T, CPUContext>(), 0, num_proposals - 1, pre_nms_topn_); nms_gpu<T>(pre_nms_topn, proposals_.template data<T, CUDAContext>(), &nms_mask_, roi_indices_.template mutable_data<int, CPUContext>(), &num_rois, 0, nms_thresh_, post_nms_topn_); retrieve_rois_gpu<T> << <GET_BLOCKS(num_rois), CUDA_NUM_THREADS >> >(num_rois, n, proposals_.template data<T, CUDAContext>(), roi_indices_.template data<int, CUDAContext>(), p_roi_item, p_score_item); CUDA_POST_KERNEL_CHECK; top_shape[0] += num_rois; } this->output(0)->Reshape(top_shape); if (this->OutputSize() > 1) { top_shape.pop_back(); this->output(1)->Reshape(top_shape); } } template void ProposalOp<CUDAContext>::RunWithType<float>(); }
e6ed386f1fc4d6a029e671882b593629808e5967.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include "canny.h" #include "image_prep.h" #include "clock.h" // TODO: optimize multiplications! // TODO: worry about memory locality later // TODO: signed or unsigned? chars or shorts? // TODO: try separable filters hipError_t err = hipSuccess; dim3 dimGrid, dimBlock; bool doSync = true; // performs a gaussian blur on an image __host__ void blur(float blurSize, byte *dImg, byte *dImgOut) { float *hFlt; unsigned fltSize; clock_t *t; gaussian_filter(blurSize, &hFlt, &fltSize); setFilter(hFlt, fltSize*fltSize*sizeof(float)); // allocate and copy filter to device // CUDAERR(hipMalloc((void **) &dFlt, fltSize*fltSize*sizeof(float)), // "allocating dFlt"); // CUDAERR(hipMemcpy(dFlt, hFlt, fltSize*fltSize*sizeof(float), // hipMemcpyHostToDevice), "copying hFlt to dFlt"); // blur image t = clock_start(); hipLaunchKernelGGL(( conv2d), dim3(dimGrid), dim3(dimBlock), 0, 0, dImg, dImgOut, height, width, fltSize, fltSize); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_BLUR); } // cleanup free(hFlt); // CUDAERR(hipFree(dFlt), "freeing dFlt"); } // performs a separable gaussian blur on an image (also using shm) __host__ void blur_sep(float blurSize, byte *dImg, byte *dImgOut) { float *hFlt; unsigned fltSize, as; clock_t *t; gaussian_filter_1d(blurSize, &hFlt, &fltSize); setFilter(hFlt, fltSize*sizeof(float)); as = fltSize/2; std::cout << "Blur filter size: " << fltSize << std::endl; dim3 dimGrid2 = dim3(ceil(width*1./(lbs-2*as)), ceil(height*1./sbs), 1); dim3 dimBlock2 = dim3(lbs, sbs, 1); dim3 dimGrid3 = dim3(ceil(width*1./sbs), ceil(height*1./(lbs-2*as)), 1); dim3 dimBlock3 = dim3(sbs, lbs, 1); // blur image t = clock_start(); hipLaunchKernelGGL(( conv1dRows), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dImg, dImgOut, height, width, fltSize); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); } hipLaunchKernelGGL(( conv1dCols), dim3(dimGrid3), dim3(dimBlock3), 0, 0, dImgOut, dImg, height, width, fltSize); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_BLUR); } // TODO: remove this CUDAERR(hipMemcpy(dImgOut, dImg, width*height, hipMemcpyDeviceToDevice), "TESTING"); // cleanup free(hFlt); } // basic sobel kernel // out is the magnitude of the gradient // out2 is the angle of the gradient __global__ void sobel(byte *img, byte *out, byte *out2, int h, int w) { int vKer, hKer, y, x; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; // make sure not on edge if (y <= 0 || y >= h-1 || x <= 0 || x >= w-1) { return; } vKer = img[(y-1)*w+(x-1)]*1 + img[(y-1)*w+x]*2 + img[(y-1)*w+(x+1)]*1 + img[(y+1)*w+(x-1)]*-1 + img[(y+1)*w+x]*-2 + img[(y+1)*w+(x+1)]*-1; hKer = img[(y-1)*w+(x-1)]*1 + img[(y-1)*w+(x+1)]*-1 + img[y*w+(x-1)]*2 + img[y*w+(x+1)]*-2 + img[(y+1)*w+(x-1)]*1 + img[(y+1)*w+(x+1)]*-1; out[y*w+x] = out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } // shared memory sobel filter __global__ void sobel_shm(byte *img, byte *out, byte *out2, int h, int w) { int y, x; int vKer, hKer; __shared__ int tmp[bs*bs]; y = (bs-2)*blockIdx.y + threadIdx.y-1; x = (bs-2)*blockIdx.x + threadIdx.x-1; // load data from image if (y>=0 && y<h && x>=0 && x<w) { tmp[ty*bs+tx] = img[y*w+x]; } __syncthreads(); // convolution and write-back if (ty>=1 && ty<bs-1 && tx>=1 && tx<bs-1 && y<h && x<w) { vKer = tmp[(ty-1)*bs+(tx-1)]*1 + tmp[(ty-1)*bs+tx]*2 + tmp[(ty-1)*bs+(tx+1)]*1 + tmp[(ty+1)*bs+(tx-1)]*-1 + tmp[(ty+1)*bs+tx]*-2 + tmp[(ty+1)*bs+(tx+1)]*-1; hKer = tmp[(ty-1)*bs+(tx-1)]*1 + tmp[(ty-1)*bs+(tx+1)]*-1 + tmp[ty*bs+(tx-1)]*2 + tmp[ty*bs+(tx+1)]*-2 + tmp[(ty+1)*bs+(tx-1)]*1 + tmp[(ty+1)*bs+(tx+1)]*-1; out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } } // separable (and shared memory) sobel filter __global__ void sobel_sep(byte *img, byte *out, byte *out2, int h, int w) { int y, x; // using int instead of byte for the following offers a 0.01s (5%) // speedup on the 16k image -- coalesced memory? int vKer, hKer; __shared__ int tmp1[bs*bs], tmp2[bs*bs], tmp3[bs*bs]; y = (bs-2)*blockIdx.y + threadIdx.y-1; x = (bs-2)*blockIdx.x + threadIdx.x-1; // load data from image if (y>=0 && y<h && x>=0 && x<w) { tmp1[ty*bs+tx] = img[y*w+x]; } __syncthreads(); // first convolution if (ty>=1 && ty<bs-1 && tx && tx<bs) { tmp2[ty*bs+tx] = tmp1[(ty-1)*bs+tx] + (tmp1[ty*bs+tx]<<1) + tmp1[(ty+1)*bs+tx]; } if (ty && ty<bs && tx>=1 && tx<bs-1) { tmp3[ty*bs+tx] = tmp1[ty*bs+(tx-1)] + (tmp1[ty*bs+tx]<<1) + tmp1[ty*bs+(tx+1)]; } __syncthreads(); // second convolution and write-back if (ty>=1 && ty<bs-1 && tx>=1 && tx<bs-1 && y<h && x<w) { hKer = tmp2[ty*bs+(tx-1)] - tmp2[ty*bs+(tx+1)]; vKer = tmp3[(ty-1)*bs+tx] - tmp3[(ty+1)*bs+tx]; out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } } // perform edge thinning __global__ void edge_thin(byte *mag, byte *angle, byte *out, int h, int w) { int y, x, y1, x1, y2, x2; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; // make sure not on the border if (y <= 0 || y >= h-1 || x <= 0 || x >= w-1) { return; } // if not greater than angles in both directions, then zero switch (angle[y*w + x]) { case 0: // horizontal y1 = y2 = y; x1 = x-1; x2 = x+1; break; case 3: // 135 y1 = y-1; x1 = x+1; y2 = y+1; x2 = x-1; break; case 2: // vertical x1 = x2 = x; y1 = y-1; y2 = y+1; break; case 1: // 45 y1 = y-1; x1 = x-1; y2 = y+1; x2 = x+1; } if (mag[y1*w + x1] >= mag[y*w + x] || mag[y2*w + x2] >= mag[y*w + x]) { out[y*w + x] = 0; } else { out[y*w + x] = mag[y*w + x]; } } // definitions for the below two functions #define MSK_LOW 0x0 // below threshold 1 #define MSK_THR 0x60 // at threshold 1 #define MSK_NEW 0x90 // at threshold 2, newly discovered #define MSK_DEF 0xff // at threshold 2 and already discovered // perform double thresholding __global__ void edge_thin(byte *dImg, byte *out, int h, int w, byte t1, byte t2) { int y, x, ind, grad; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; if (y >= h || x >= w) { return; } ind = y*w + x; grad = dImg[ind]; if (grad < t1) { out[ind] = MSK_LOW; } else if (grad < t2) { out[ind] = MSK_THR; } else { out[ind] = MSK_NEW; } } // check and set neighbor #define CAS(buf, cond, x2, y2, width) \ if ((cond) && buf[(y2)*(width)+(x2)] == MSK_THR) { \ buf[(y2)*(width)+(x2)] = MSK_NEW; \ } // perform one iteration of hysteresis __global__ void hysteresis(byte *dImg, int h, int w, bool final) { int y, x; __shared__ byte changes; // infer y, x, from block/thread index y = blockDim.y * blockIdx.y + threadIdx.y; x = blockDim.x * blockIdx.x + threadIdx.x; // check if pixel is connected to its neighbors; continue until // no changes remaining do { __syncthreads(); changes = 0; __syncthreads(); // make sure inside bounds -- need this here b/c we can't have // __syncthreads() cause a branch divergence in a warp; // see https://stackoverflow.com/a/6667067/2397327 // if newly-discovered edge, then check its neighbors if ((x<w && y<h) && dImg[y*w+x] == MSK_NEW) { // promote to definitely discovered dImg[y*w+x] = MSK_DEF; changes = 1; // check neighbors CAS(dImg, x>0&&y>0, x-1, y-1, w); CAS(dImg, y>0, x, y-1, w); CAS(dImg, x<w-1&&y>0, x+1, y-1, w); CAS(dImg, x<w-1, x+1, y, w); CAS(dImg, x<w-1&&y<h-1, x+1, y+1, w); CAS(dImg, y<h-1, x, y+1, w); CAS(dImg, x>0&&y<h-1, x-1, y+1, w); CAS(dImg, x>0, x-1, y, w); } __syncthreads(); } while (changes); // set all threshold1 values to 0 if (final && (x<w && y<h) && dImg[y*w+x] != MSK_DEF) { dImg[y*w+x] = 0; } } // shared memory version of hysteresis __global__ void hysteresis_shm(byte *dImg, int h, int w, bool final) { int y, x; bool in_bounds; __shared__ byte changes, tmp[bs*bs]; // infer y, x, from block/thread index y = (bs-2)*blockIdx.y + ty-1; x = (bs-2)*blockIdx.x + tx-1; in_bounds = (x<w && y<h) && (tx>=1 && tx<bs-1 && ty>=1 && ty<bs-1); if (y>=0 && y<h && x>=0 && x<w) { tmp[ty*bs+tx] = dImg[y*w+x]; } __syncthreads(); // check if pixel is connected to its neighbors; continue until // no changes remaining do { __syncthreads(); changes = 0; __syncthreads(); // make sure inside bounds -- need this here b/c we can't have // __syncthreads() cause a branch divergence in a warp; // see https://stackoverflow.com/a/6667067/2397327 // if newly-discovered edge, then check its neighbors if (in_bounds && tmp[ty*bs+tx] == MSK_NEW) { // promote to definitely discovered tmp[ty*bs+tx] = MSK_DEF; changes = 1; // check neighbors CAS(tmp, 1, tx-1, ty-1, bs); CAS(tmp, 1, tx, ty-1, bs); CAS(tmp, x<w-1, tx+1, ty-1, bs); CAS(tmp, x<w-1, tx+1, ty, bs); CAS(tmp, x<w-1&&y<h-1, tx+1, ty+1, bs); CAS(tmp, y<h-1, tx, ty+1, bs); CAS(tmp, y<h-1, tx-1, ty+1, bs); CAS(tmp, 1, tx-1, ty, bs); } __syncthreads(); } while (changes); if (y>=0 && y<h && x>=0 && x<w) { if (final) { if (in_bounds) { dImg[y*w+x] = MSK_DEF*(tmp[ty*bs+tx]==MSK_DEF); } } else { dImg[y*w+x] = max(dImg[y*w+x], tmp[ty*bs+tx]); } } } // perform canny edge detection __host__ void canny(byte *dImg, byte *dImgOut, float blurStd, float threshold1, float threshold2, int hystIters) { byte *dImgTmp; clock_t *t; int i; CUDAERR(hipMalloc((void**)&dImgTmp, width*height), "alloc dImgTmp"); // blur(blurStd, dImg, dImgOut); blur_sep(blurStd, dImg, dImgOut); // different grid with 1-width apron for shared-memory schemes dim3 dimGrid2 = dim3(ceil(width*1./(bs-2)), ceil(height*1./(bs-2)), 1); dim3 dimBlock2 = dim3(bs, bs, 1); t = clock_start(); std::cout << "Performing Sobel filter..." << std::endl; // sobel<<<dimGrid, dimBlock>>>(dImgOut, dImg, dImgTmp, // height, width); hipLaunchKernelGGL(( sobel_shm), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dImgOut, dImg, dImgTmp, height, width); // sobel_sep<<<dimGrid2, dimBlock2>>>(dImgOut, dImg, dImgTmp, // height, width); CUDAERR(hipGetLastError(), "launch sobel kernel"); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_SOBEL); } std::cout << "Performing edge thinning..." << std::endl; hipLaunchKernelGGL(( edge_thin), dim3(dimGrid), dim3(dimBlock), 0, 0, dImg, dImgTmp, dImgOut, height, width); CUDAERR(hipGetLastError(), "launch edge thinning kernel"); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_THIN); } std::cout << "Performing double thresholding..." << std::endl; hipLaunchKernelGGL(( edge_thin), dim3(dimGrid), dim3(dimBlock), 0, 0, dImgOut, dImgTmp, height, width, 255*threshold1, 255*threshold2); CUDAERR(hipGetLastError(), "launch double thresholding kernel"); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_THRES); } if (hystIters) { std::cout << "Performing hysteresis..." << std::endl; for (i = 0; i < hystIters; ++i) { // hysteresis<<<dimGrid, dimBlock>>>(dImgTmp, // height, width, i==hyst_iters-1); hipLaunchKernelGGL(( hysteresis_shm), dim3(dimGrid2), dim3(dimBlock2), 0, 0, dImgTmp, height, width, i==hystIters-1); CUDAERR(hipGetLastError(), "launch hysteresis kernel"); if (doSync) { CUDAERR(hipDeviceSynchronize(), "hipDeviceSynchronize()"); clock_lap(t, CLK_HYST); } } } // TODO: remove this CUDAERR(hipMemcpy(dImgOut, dImgTmp, width*height, hipMemcpyDeviceToDevice), "TESTING"); // dTmp = dImg; // dImg = dImgOut; // dImgOut = dTmp; CUDAERR(hipFree(dImgTmp), "freeing dImgTmp"); } // print timings __host__ void print_timings(void) { std::cout << "overall:\t" << clock_ave[CLK_ALL] << "s" << std::endl; // doSync off means only overall time counted if (!doSync) { return; } std::cout << "grayscale:\t" << clock_ave[CLK_GRAY] << "s" << std::endl << "blur:\t\t" << clock_ave[CLK_BLUR] << "s" << std::endl << "sobel\t\t" << clock_ave[CLK_SOBEL] << "s" << std::endl << "edgethin:\t" << clock_ave[CLK_THIN] << "s" << std::endl << "threshold:\t" << clock_ave[CLK_THRES] << "s" << std::endl << "hysteresis:\t" << clock_ave[CLK_HYST] << "s" << std::endl << "hyst total:\t" << clock_total[CLK_HYST] << "s" << std::endl; } __host__ int main(void) { std::string inFile, outFile; unsigned i, channels, rowStride, hystIters; byte *hImg, *dImg, *dImgMono, *dImgMonoOut; float blurStd, threshold1, threshold2; clock_t *tGray, *tOverall; // get image name std::cout << "Enter infile (without .png): "; std::cin >> inFile; std::cout << "Enter outfile (without .png): "; std::cin >> outFile; std::cout << "Blur stdev: "; std::cin >> blurStd; std::cout << "Threshold 1: "; std::cin >> threshold1; std::cout << "Threshold 2: "; std::cin >> threshold2; std::cout << "Hysteresis iters: "; std::cin >> hystIters; std::cout << "Sync after each kernel? "; std::cin >> doSync; inFile += ".png"; outFile += "_bs" + std::to_string(blurStd) + "_th" + std::to_string(threshold1) + "_th" + std::to_string(threshold2) + (hystIters ? "" : "_nohyst") + ".png"; // get image std::cout << "Reading image from file..." << std::endl; read_png_file(inFile.c_str()); channels = color_type==PNG_COLOR_TYPE_RGBA ? 4 : 3; rowStride = width*channels; std::cout << "Channels: " << channels << std::endl; // allocate memory std::cout << "Allocating host and device buffers..." << std::endl; hImg = (byte *)malloc(width*height*channels); CUDAERR(hipMalloc((void **)&dImg, width*height*channels), "hipMalloc dImg"); CUDAERR(hipMalloc((void **)&dImgMono, width*height), "hipMalloc dImgMono"); CUDAERR(hipMalloc((void **)&dImgMonoOut, width*height), "hipMalloc dImgMonoOut"); // copy image from row-pointers to device for (i = 0; i < height; ++i) { memcpy(hImg + i*rowStride, row_pointers[i], rowStride); } // copy image to device std::cout << "Copying image to device..." << std::endl; CUDAERR(hipMemcpy(dImg, hImg, width*height*channels, hipMemcpyHostToDevice), "hipMemcpy to device"); // set kernel parameters (same for all future kernel invocations) // TODO: calculate best grid/block dim depending on the device dimGrid = dim3(ceil(rowStride*1./bs), ceil(height*1./bs), 1); dimBlock = dim3(bs, bs, 1); // convert to grayscale hipDeviceSynchronize(); tOverall = clock_start(); tGray = clock_start(); std::cout << "Converting to grayscale..." << std::endl; hipLaunchKernelGGL(( toGrayScale), dim3(dimGrid), dim3(dimBlock), 0, 0, dImg, dImgMono, height, width, channels); CUDAERR(hipGetLastError(), "launch toGrayScale kernel"); if (doSync) { hipDeviceSynchronize(); clock_lap(tGray, CLK_GRAY); } // canny edge detection std::cout << "Performing canny edge-detection..." << std::endl; canny(dImgMono, dImgMonoOut, blurStd, threshold1, threshold2, hystIters); // convert back from grayscale tGray = clock_start(); std::cout << "Convert image back to multi-channel..." << std::endl; hipLaunchKernelGGL(( fromGrayScale), dim3(dimGrid), dim3(dimBlock), 0, 0, dImgMonoOut, dImg, height, width, channels); CUDAERR(hipGetLastError(), "launch fromGrayScale kernel"); hipDeviceSynchronize(); if (doSync) { clock_lap(tGray, CLK_GRAY); } clock_lap(tOverall, CLK_ALL); // copy image back to host std::cout << "Copy image back to host..." << std::endl; CUDAERR(hipMemcpy(hImg, dImg, width*height*channels, hipMemcpyDeviceToHost), "hipMemcpy to host"); // copy image back to row_pointers std::cout << "Copy image back to row_pointers..." << std::endl; for (i = 0; i < height; ++i) { memcpy(row_pointers[i], hImg + i*rowStride, rowStride); } print_timings(); // copy image back from device std::cout << "Writing image back to file..." << std::endl; write_png_file(outFile.c_str()); // freeing pointers std::cout << "Freeing device memory..." << std::endl; CUDAERR(hipFree(dImg), "freeing dImg"); CUDAERR(hipFree(dImgMono), "freeing dImgMono"); CUDAERR(hipFree(dImgMonoOut), "freeing dImgMonoOut"); std::cout << "Done." << std::endl; }
e6ed386f1fc4d6a029e671882b593629808e5967.cu
#include <string> #include "canny.h" #include "image_prep.h" #include "clock.h" // TODO: optimize multiplications! // TODO: worry about memory locality later // TODO: signed or unsigned? chars or shorts? // TODO: try separable filters cudaError_t err = cudaSuccess; dim3 dimGrid, dimBlock; bool doSync = true; // performs a gaussian blur on an image __host__ void blur(float blurSize, byte *dImg, byte *dImgOut) { float *hFlt; unsigned fltSize; clock_t *t; gaussian_filter(blurSize, &hFlt, &fltSize); setFilter(hFlt, fltSize*fltSize*sizeof(float)); // allocate and copy filter to device // CUDAERR(cudaMalloc((void **) &dFlt, fltSize*fltSize*sizeof(float)), // "allocating dFlt"); // CUDAERR(cudaMemcpy(dFlt, hFlt, fltSize*fltSize*sizeof(float), // cudaMemcpyHostToDevice), "copying hFlt to dFlt"); // blur image t = clock_start(); conv2d<<<dimGrid, dimBlock>>>(dImg, dImgOut, height, width, fltSize, fltSize); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_BLUR); } // cleanup free(hFlt); // CUDAERR(cudaFree(dFlt), "freeing dFlt"); } // performs a separable gaussian blur on an image (also using shm) __host__ void blur_sep(float blurSize, byte *dImg, byte *dImgOut) { float *hFlt; unsigned fltSize, as; clock_t *t; gaussian_filter_1d(blurSize, &hFlt, &fltSize); setFilter(hFlt, fltSize*sizeof(float)); as = fltSize/2; std::cout << "Blur filter size: " << fltSize << std::endl; dim3 dimGrid2 = dim3(ceil(width*1./(lbs-2*as)), ceil(height*1./sbs), 1); dim3 dimBlock2 = dim3(lbs, sbs, 1); dim3 dimGrid3 = dim3(ceil(width*1./sbs), ceil(height*1./(lbs-2*as)), 1); dim3 dimBlock3 = dim3(sbs, lbs, 1); // blur image t = clock_start(); conv1dRows<<<dimGrid2, dimBlock2>>>(dImg, dImgOut, height, width, fltSize); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); } conv1dCols<<<dimGrid3, dimBlock3>>>(dImgOut, dImg, height, width, fltSize); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_BLUR); } // TODO: remove this CUDAERR(cudaMemcpy(dImgOut, dImg, width*height, cudaMemcpyDeviceToDevice), "TESTING"); // cleanup free(hFlt); } // basic sobel kernel // out is the magnitude of the gradient // out2 is the angle of the gradient __global__ void sobel(byte *img, byte *out, byte *out2, int h, int w) { int vKer, hKer, y, x; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; // make sure not on edge if (y <= 0 || y >= h-1 || x <= 0 || x >= w-1) { return; } vKer = img[(y-1)*w+(x-1)]*1 + img[(y-1)*w+x]*2 + img[(y-1)*w+(x+1)]*1 + img[(y+1)*w+(x-1)]*-1 + img[(y+1)*w+x]*-2 + img[(y+1)*w+(x+1)]*-1; hKer = img[(y-1)*w+(x-1)]*1 + img[(y-1)*w+(x+1)]*-1 + img[y*w+(x-1)]*2 + img[y*w+(x+1)]*-2 + img[(y+1)*w+(x-1)]*1 + img[(y+1)*w+(x+1)]*-1; out[y*w+x] = out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } // shared memory sobel filter __global__ void sobel_shm(byte *img, byte *out, byte *out2, int h, int w) { int y, x; int vKer, hKer; __shared__ int tmp[bs*bs]; y = (bs-2)*blockIdx.y + threadIdx.y-1; x = (bs-2)*blockIdx.x + threadIdx.x-1; // load data from image if (y>=0 && y<h && x>=0 && x<w) { tmp[ty*bs+tx] = img[y*w+x]; } __syncthreads(); // convolution and write-back if (ty>=1 && ty<bs-1 && tx>=1 && tx<bs-1 && y<h && x<w) { vKer = tmp[(ty-1)*bs+(tx-1)]*1 + tmp[(ty-1)*bs+tx]*2 + tmp[(ty-1)*bs+(tx+1)]*1 + tmp[(ty+1)*bs+(tx-1)]*-1 + tmp[(ty+1)*bs+tx]*-2 + tmp[(ty+1)*bs+(tx+1)]*-1; hKer = tmp[(ty-1)*bs+(tx-1)]*1 + tmp[(ty-1)*bs+(tx+1)]*-1 + tmp[ty*bs+(tx-1)]*2 + tmp[ty*bs+(tx+1)]*-2 + tmp[(ty+1)*bs+(tx-1)]*1 + tmp[(ty+1)*bs+(tx+1)]*-1; out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } } // separable (and shared memory) sobel filter __global__ void sobel_sep(byte *img, byte *out, byte *out2, int h, int w) { int y, x; // using int instead of byte for the following offers a 0.01s (5%) // speedup on the 16k image -- coalesced memory? int vKer, hKer; __shared__ int tmp1[bs*bs], tmp2[bs*bs], tmp3[bs*bs]; y = (bs-2)*blockIdx.y + threadIdx.y-1; x = (bs-2)*blockIdx.x + threadIdx.x-1; // load data from image if (y>=0 && y<h && x>=0 && x<w) { tmp1[ty*bs+tx] = img[y*w+x]; } __syncthreads(); // first convolution if (ty>=1 && ty<bs-1 && tx && tx<bs) { tmp2[ty*bs+tx] = tmp1[(ty-1)*bs+tx] + (tmp1[ty*bs+tx]<<1) + tmp1[(ty+1)*bs+tx]; } if (ty && ty<bs && tx>=1 && tx<bs-1) { tmp3[ty*bs+tx] = tmp1[ty*bs+(tx-1)] + (tmp1[ty*bs+tx]<<1) + tmp1[ty*bs+(tx+1)]; } __syncthreads(); // second convolution and write-back if (ty>=1 && ty<bs-1 && tx>=1 && tx<bs-1 && y<h && x<w) { hKer = tmp2[ty*bs+(tx-1)] - tmp2[ty*bs+(tx+1)]; vKer = tmp3[(ty-1)*bs+tx] - tmp3[(ty+1)*bs+tx]; out[y*w+x] = sqrtf(hKer*hKer + vKer*vKer); out2[y*w+x] = (byte)((atan2f(vKer,hKer)+9/8*M_PI)*4/M_PI)&0x3; } } // perform edge thinning __global__ void edge_thin(byte *mag, byte *angle, byte *out, int h, int w) { int y, x, y1, x1, y2, x2; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; // make sure not on the border if (y <= 0 || y >= h-1 || x <= 0 || x >= w-1) { return; } // if not greater than angles in both directions, then zero switch (angle[y*w + x]) { case 0: // horizontal y1 = y2 = y; x1 = x-1; x2 = x+1; break; case 3: // 135 y1 = y-1; x1 = x+1; y2 = y+1; x2 = x-1; break; case 2: // vertical x1 = x2 = x; y1 = y-1; y2 = y+1; break; case 1: // 45 y1 = y-1; x1 = x-1; y2 = y+1; x2 = x+1; } if (mag[y1*w + x1] >= mag[y*w + x] || mag[y2*w + x2] >= mag[y*w + x]) { out[y*w + x] = 0; } else { out[y*w + x] = mag[y*w + x]; } } // definitions for the below two functions #define MSK_LOW 0x0 // below threshold 1 #define MSK_THR 0x60 // at threshold 1 #define MSK_NEW 0x90 // at threshold 2, newly discovered #define MSK_DEF 0xff // at threshold 2 and already discovered // perform double thresholding __global__ void edge_thin(byte *dImg, byte *out, int h, int w, byte t1, byte t2) { int y, x, ind, grad; y = blockDim.y*blockIdx.y + threadIdx.y; x = blockDim.x*blockIdx.x + threadIdx.x; if (y >= h || x >= w) { return; } ind = y*w + x; grad = dImg[ind]; if (grad < t1) { out[ind] = MSK_LOW; } else if (grad < t2) { out[ind] = MSK_THR; } else { out[ind] = MSK_NEW; } } // check and set neighbor #define CAS(buf, cond, x2, y2, width) \ if ((cond) && buf[(y2)*(width)+(x2)] == MSK_THR) { \ buf[(y2)*(width)+(x2)] = MSK_NEW; \ } // perform one iteration of hysteresis __global__ void hysteresis(byte *dImg, int h, int w, bool final) { int y, x; __shared__ byte changes; // infer y, x, from block/thread index y = blockDim.y * blockIdx.y + threadIdx.y; x = blockDim.x * blockIdx.x + threadIdx.x; // check if pixel is connected to its neighbors; continue until // no changes remaining do { __syncthreads(); changes = 0; __syncthreads(); // make sure inside bounds -- need this here b/c we can't have // __syncthreads() cause a branch divergence in a warp; // see https://stackoverflow.com/a/6667067/2397327 // if newly-discovered edge, then check its neighbors if ((x<w && y<h) && dImg[y*w+x] == MSK_NEW) { // promote to definitely discovered dImg[y*w+x] = MSK_DEF; changes = 1; // check neighbors CAS(dImg, x>0&&y>0, x-1, y-1, w); CAS(dImg, y>0, x, y-1, w); CAS(dImg, x<w-1&&y>0, x+1, y-1, w); CAS(dImg, x<w-1, x+1, y, w); CAS(dImg, x<w-1&&y<h-1, x+1, y+1, w); CAS(dImg, y<h-1, x, y+1, w); CAS(dImg, x>0&&y<h-1, x-1, y+1, w); CAS(dImg, x>0, x-1, y, w); } __syncthreads(); } while (changes); // set all threshold1 values to 0 if (final && (x<w && y<h) && dImg[y*w+x] != MSK_DEF) { dImg[y*w+x] = 0; } } // shared memory version of hysteresis __global__ void hysteresis_shm(byte *dImg, int h, int w, bool final) { int y, x; bool in_bounds; __shared__ byte changes, tmp[bs*bs]; // infer y, x, from block/thread index y = (bs-2)*blockIdx.y + ty-1; x = (bs-2)*blockIdx.x + tx-1; in_bounds = (x<w && y<h) && (tx>=1 && tx<bs-1 && ty>=1 && ty<bs-1); if (y>=0 && y<h && x>=0 && x<w) { tmp[ty*bs+tx] = dImg[y*w+x]; } __syncthreads(); // check if pixel is connected to its neighbors; continue until // no changes remaining do { __syncthreads(); changes = 0; __syncthreads(); // make sure inside bounds -- need this here b/c we can't have // __syncthreads() cause a branch divergence in a warp; // see https://stackoverflow.com/a/6667067/2397327 // if newly-discovered edge, then check its neighbors if (in_bounds && tmp[ty*bs+tx] == MSK_NEW) { // promote to definitely discovered tmp[ty*bs+tx] = MSK_DEF; changes = 1; // check neighbors CAS(tmp, 1, tx-1, ty-1, bs); CAS(tmp, 1, tx, ty-1, bs); CAS(tmp, x<w-1, tx+1, ty-1, bs); CAS(tmp, x<w-1, tx+1, ty, bs); CAS(tmp, x<w-1&&y<h-1, tx+1, ty+1, bs); CAS(tmp, y<h-1, tx, ty+1, bs); CAS(tmp, y<h-1, tx-1, ty+1, bs); CAS(tmp, 1, tx-1, ty, bs); } __syncthreads(); } while (changes); if (y>=0 && y<h && x>=0 && x<w) { if (final) { if (in_bounds) { dImg[y*w+x] = MSK_DEF*(tmp[ty*bs+tx]==MSK_DEF); } } else { dImg[y*w+x] = max(dImg[y*w+x], tmp[ty*bs+tx]); } } } // perform canny edge detection __host__ void canny(byte *dImg, byte *dImgOut, float blurStd, float threshold1, float threshold2, int hystIters) { byte *dImgTmp; clock_t *t; int i; CUDAERR(cudaMalloc((void**)&dImgTmp, width*height), "alloc dImgTmp"); // blur(blurStd, dImg, dImgOut); blur_sep(blurStd, dImg, dImgOut); // different grid with 1-width apron for shared-memory schemes dim3 dimGrid2 = dim3(ceil(width*1./(bs-2)), ceil(height*1./(bs-2)), 1); dim3 dimBlock2 = dim3(bs, bs, 1); t = clock_start(); std::cout << "Performing Sobel filter..." << std::endl; // sobel<<<dimGrid, dimBlock>>>(dImgOut, dImg, dImgTmp, // height, width); sobel_shm<<<dimGrid2, dimBlock2>>>(dImgOut, dImg, dImgTmp, height, width); // sobel_sep<<<dimGrid2, dimBlock2>>>(dImgOut, dImg, dImgTmp, // height, width); CUDAERR(cudaGetLastError(), "launch sobel kernel"); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_SOBEL); } std::cout << "Performing edge thinning..." << std::endl; edge_thin<<<dimGrid, dimBlock>>>(dImg, dImgTmp, dImgOut, height, width); CUDAERR(cudaGetLastError(), "launch edge thinning kernel"); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_THIN); } std::cout << "Performing double thresholding..." << std::endl; edge_thin<<<dimGrid, dimBlock>>>(dImgOut, dImgTmp, height, width, 255*threshold1, 255*threshold2); CUDAERR(cudaGetLastError(), "launch double thresholding kernel"); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_THRES); } if (hystIters) { std::cout << "Performing hysteresis..." << std::endl; for (i = 0; i < hystIters; ++i) { // hysteresis<<<dimGrid, dimBlock>>>(dImgTmp, // height, width, i==hyst_iters-1); hysteresis_shm<<<dimGrid2, dimBlock2>>>(dImgTmp, height, width, i==hystIters-1); CUDAERR(cudaGetLastError(), "launch hysteresis kernel"); if (doSync) { CUDAERR(cudaDeviceSynchronize(), "cudaDeviceSynchronize()"); clock_lap(t, CLK_HYST); } } } // TODO: remove this CUDAERR(cudaMemcpy(dImgOut, dImgTmp, width*height, cudaMemcpyDeviceToDevice), "TESTING"); // dTmp = dImg; // dImg = dImgOut; // dImgOut = dTmp; CUDAERR(cudaFree(dImgTmp), "freeing dImgTmp"); } // print timings __host__ void print_timings(void) { std::cout << "overall:\t" << clock_ave[CLK_ALL] << "s" << std::endl; // doSync off means only overall time counted if (!doSync) { return; } std::cout << "grayscale:\t" << clock_ave[CLK_GRAY] << "s" << std::endl << "blur:\t\t" << clock_ave[CLK_BLUR] << "s" << std::endl << "sobel\t\t" << clock_ave[CLK_SOBEL] << "s" << std::endl << "edgethin:\t" << clock_ave[CLK_THIN] << "s" << std::endl << "threshold:\t" << clock_ave[CLK_THRES] << "s" << std::endl << "hysteresis:\t" << clock_ave[CLK_HYST] << "s" << std::endl << "hyst total:\t" << clock_total[CLK_HYST] << "s" << std::endl; } __host__ int main(void) { std::string inFile, outFile; unsigned i, channels, rowStride, hystIters; byte *hImg, *dImg, *dImgMono, *dImgMonoOut; float blurStd, threshold1, threshold2; clock_t *tGray, *tOverall; // get image name std::cout << "Enter infile (without .png): "; std::cin >> inFile; std::cout << "Enter outfile (without .png): "; std::cin >> outFile; std::cout << "Blur stdev: "; std::cin >> blurStd; std::cout << "Threshold 1: "; std::cin >> threshold1; std::cout << "Threshold 2: "; std::cin >> threshold2; std::cout << "Hysteresis iters: "; std::cin >> hystIters; std::cout << "Sync after each kernel? "; std::cin >> doSync; inFile += ".png"; outFile += "_bs" + std::to_string(blurStd) + "_th" + std::to_string(threshold1) + "_th" + std::to_string(threshold2) + (hystIters ? "" : "_nohyst") + ".png"; // get image std::cout << "Reading image from file..." << std::endl; read_png_file(inFile.c_str()); channels = color_type==PNG_COLOR_TYPE_RGBA ? 4 : 3; rowStride = width*channels; std::cout << "Channels: " << channels << std::endl; // allocate memory std::cout << "Allocating host and device buffers..." << std::endl; hImg = (byte *)malloc(width*height*channels); CUDAERR(cudaMalloc((void **)&dImg, width*height*channels), "cudaMalloc dImg"); CUDAERR(cudaMalloc((void **)&dImgMono, width*height), "cudaMalloc dImgMono"); CUDAERR(cudaMalloc((void **)&dImgMonoOut, width*height), "cudaMalloc dImgMonoOut"); // copy image from row-pointers to device for (i = 0; i < height; ++i) { memcpy(hImg + i*rowStride, row_pointers[i], rowStride); } // copy image to device std::cout << "Copying image to device..." << std::endl; CUDAERR(cudaMemcpy(dImg, hImg, width*height*channels, cudaMemcpyHostToDevice), "cudaMemcpy to device"); // set kernel parameters (same for all future kernel invocations) // TODO: calculate best grid/block dim depending on the device dimGrid = dim3(ceil(rowStride*1./bs), ceil(height*1./bs), 1); dimBlock = dim3(bs, bs, 1); // convert to grayscale cudaDeviceSynchronize(); tOverall = clock_start(); tGray = clock_start(); std::cout << "Converting to grayscale..." << std::endl; toGrayScale<<<dimGrid, dimBlock>>>(dImg, dImgMono, height, width, channels); CUDAERR(cudaGetLastError(), "launch toGrayScale kernel"); if (doSync) { cudaDeviceSynchronize(); clock_lap(tGray, CLK_GRAY); } // canny edge detection std::cout << "Performing canny edge-detection..." << std::endl; canny(dImgMono, dImgMonoOut, blurStd, threshold1, threshold2, hystIters); // convert back from grayscale tGray = clock_start(); std::cout << "Convert image back to multi-channel..." << std::endl; fromGrayScale<<<dimGrid, dimBlock>>>(dImgMonoOut, dImg, height, width, channels); CUDAERR(cudaGetLastError(), "launch fromGrayScale kernel"); cudaDeviceSynchronize(); if (doSync) { clock_lap(tGray, CLK_GRAY); } clock_lap(tOverall, CLK_ALL); // copy image back to host std::cout << "Copy image back to host..." << std::endl; CUDAERR(cudaMemcpy(hImg, dImg, width*height*channels, cudaMemcpyDeviceToHost), "cudaMemcpy to host"); // copy image back to row_pointers std::cout << "Copy image back to row_pointers..." << std::endl; for (i = 0; i < height; ++i) { memcpy(row_pointers[i], hImg + i*rowStride, rowStride); } print_timings(); // copy image back from device std::cout << "Writing image back to file..." << std::endl; write_png_file(outFile.c_str()); // freeing pointers std::cout << "Freeing device memory..." << std::endl; CUDAERR(cudaFree(dImg), "freeing dImg"); CUDAERR(cudaFree(dImgMono), "freeing dImgMono"); CUDAERR(cudaFree(dImgMonoOut), "freeing dImgMonoOut"); std::cout << "Done." << std::endl; }
7393db27804b669244647f67e7562ff887a35663.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { __device__ int compute_uncropped_index(int index, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets) { int dest_index = index; int src_index = 0; for (int i = 0; i < ndims; ++i) { int coord = dest_index / dest_strides[i]; dest_index -= coord * dest_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int nthreads, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets, const Dtype *src, Dtype *dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index(index, ndims, src_strides, dest_strides, offsets); dest[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int nthreads, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets, Dtype *src, const Dtype *dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index(index, ndims, src_strides, dest_strides, offsets); src[src_index] = dest[index]; } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); int n = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( crop_kernel_forward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { const Dtype *top_diff = top[0]->gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); int n = top[0]->count(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( crop_kernel_backward), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
7393db27804b669244647f67e7562ff887a35663.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { __device__ int compute_uncropped_index(int index, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets) { int dest_index = index; int src_index = 0; for (int i = 0; i < ndims; ++i) { int coord = dest_index / dest_strides[i]; dest_index -= coord * dest_strides[i]; src_index += src_strides[i] * (coord + offsets[i]); } return src_index; } template <typename Dtype> __global__ void crop_kernel_forward(const int nthreads, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets, const Dtype *src, Dtype *dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index(index, ndims, src_strides, dest_strides, offsets); dest[index] = src[src_index]; } } template <typename Dtype> __global__ void crop_kernel_backward(const int nthreads, const int ndims, const int *src_strides, const int *dest_strides, const int *offsets, Dtype *src, const Dtype *dest) { CUDA_KERNEL_LOOP(index, nthreads) { int src_index = compute_uncropped_index(index, ndims, src_strides, dest_strides, offsets); src[src_index] = dest[index]; } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { const Dtype *bottom_data = bottom[0]->gpu_data(); Dtype *top_data = top[0]->mutable_gpu_data(); int n = top[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_forward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_data, top_data); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { const Dtype *top_diff = top[0]->gpu_diff(); Dtype *bottom_diff = bottom[0]->mutable_gpu_diff(); int n = top[0]->count(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) crop_kernel_backward<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>( n, bottom[0]->num_axes(), src_strides_.gpu_data(), dest_strides_.gpu_data(), offsets.gpu_data(), bottom_diff, top_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
9058bb8631d78a637bc82615e3d8b0f3cfff80a7.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include <stdlib.h> #include "math.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "teem/nrrd.h" #include "Image.h" #include "sstream" using namespace std; #define PI 3.14159265 texture<short, 3, hipReadModeNormalizedFloat> tex0; // 3D texture texture<short, 3, hipReadModeNormalizedFloat> tex1; // 3D texture hipArray *d_volumeArray0 = 0; hipArray *d_volumeArray1 = 0; // w0, w1, w2, and w3 are the four cubic B-spline basis functions __host__ __device__ float w0(float a) { return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); } __host__ __device__ float w1(float a) { return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f); } __host__ __device__ float w2(float a) { return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f); } __host__ __device__ float w3(float a) { return (1.0f/6.0f)*(a*a*a); } //derivatives of basic functions __host__ __device__ float w0g(float a) { return -(1.0f/2.0f)*a*a + a - (1.0f/2.0f); } __host__ __device__ float w1g(float a) { return (3.0f/2.0f)*a*a - 2*a; } __host__ __device__ float w2g(float a) { return -(3.0f/2.0f)*a*a + a + (1.0/2.0); } __host__ __device__ float w3g(float a) { return (1.0f/2.0f)*a*a; } // filter 4 values using cubic splines template<class T> __device__ T cubicFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0(x); r += c1 * w1(x); r += c2 * w2(x); r += c3 * w3(x); return r; } //filtering with derivative of basic functions template<class T> __device__ T cubicFilter_G(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0g(x); r += c1 * w1g(x); r += c2 * w2g(x); r += c3 * w3g(x); return r; } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //gradient in X direction template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GX(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GY(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GX(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GY(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GZ(const texture<T, 3, hipReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } __host__ __device__ int cu_getIndex2(int i, int j, int s1, int s2) { return i*s2+j; } __host__ __device__ double dotProduct(double *u, double *v, int s) { double result = 0; for (int i=0; i<s; i++) result += (u[i]*v[i]); return result; } __host__ __device__ double lenVec(double *a, int s) { double len = 0; for (int i=0; i<s; i++) len += (a[i]*a[i]); len = sqrt(len); return len; } void mulMatPoint(double X[4][4], double Y[4], double Z[4]) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[i][k]*Y[k]); } __device__ void cu_mulMatPoint(double* X, double* Y, double* Z) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[cu_getIndex2(i,k,4,4)]*Y[k]); } __device__ void cu_mulMatPoint3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) Z[i] = 0; for (int i=0; i<3; i++) for (int k=0; k<3; k++) Z[i] += (X[cu_getIndex2(i,k,3,3)]*Y[k]); } __host__ __device__ void advancePoint(double* point, double* dir, double scale, double* newpos) { for (int i=0; i<3; i++) newpos[i] = point[i]+dir[i]*scale; } __device__ bool cu_isInsideDouble(double i, double j, double k, int dim1, int dim2, int dim3) { return ((i>=0)&&(i<=(dim1-1))&&(j>=0)&&(j<=(dim2-1))&&(k>=0)&&(k<=(dim3-1))); } __device__ double cu_computeAlpha(double val, double grad_len, double isoval, double alphamax, double thickness) { if ((grad_len == 0.0) && (val == isoval)) return alphamax; else if ((grad_len>0.0) && (isoval >= (val-thickness*grad_len)) && (isoval <= (val+thickness*grad_len))) return alphamax*(1-abs(isoval-val)/(grad_len*thickness)); else return 0.0; } __device__ double cu_inAlpha(double val, double grad_len, double isoval, double thickness) { if (val >= isoval) return 1.0; else { return max(0.0,(1-abs(isoval-val)/(grad_len*thickness))); } } __host__ __device__ void normalize(double *a, int s) { double len = lenVec(a,s); for (int i=0; i<s; i++) a[i] = a[i]/len; } __global__ void kernel(int* dim, int *size, double hor_extent, double ver_extent, double *center, double *viewdir, double *right, double *up, double *light_dir, double nc, double fc, double raystep, double refstep, double* mat_trans, double* mat_trans_inv, double* MT_BE_inv, double phongKa, double phongKd, double isoval, double alphamax, double thickness, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double hor_ratio = hor_extent/size[0]; double ver_ratio = ver_extent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double startPoint1[4]; startPoint1[3] = 1; advancePoint(center,right,ni*ver_ratio,startPoint1); double startPoint2[4]; startPoint2[3] = 1; advancePoint(startPoint1,up,nj*hor_ratio,startPoint2); memcpy(startPoint1,startPoint2,4*sizeof(double)); double accColor = 0; double transp = 1; double indPoint[4]; double val; double gradi[3]; double gradw[3]; double gradw_len; //double gradi_len; double depth; double pointColor; double alpha; double mipVal = 0; double valgfp; for (double k=0; k<fc-nc; k+=raystep) { advancePoint(startPoint1,viewdir,raystep,startPoint2); cu_mulMatPoint(mat_trans_inv,startPoint1,indPoint); if (cu_isInsideDouble(indPoint[0],indPoint[1],indPoint[2],dim[1],dim[2],dim[3])) { val = tex3DBicubic<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[0] = tex3DBicubic_GX<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[1] = tex3DBicubic_GY<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[2] = tex3DBicubic_GZ<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradw_len = lenVec(gradw,3); //negating and normalizing for (int l=0; l<3; l++) gradw[l] = -gradw[l]/gradw_len; depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + depth*phongKd*max(0.0f,dotProduct(gradw,light_dir,3)); alpha = cu_computeAlpha(val, gradw_len, isoval, alphamax, thickness); alpha = 1 - pow(1-alpha,raystep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; valgfp = tex3DBicubic<short,float>(tex0,indPoint[0],indPoint[1],indPoint[2]); mipVal = max(mipVal,valgfp*cu_inAlpha(val,gradw_len,isoval,thickness)); } memcpy(startPoint1,startPoint2,4*sizeof(double)); } double accAlpha = 1 - transp; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } double calDet44(double X[][4]) { double value = ( X[0][3]*X[1][2]*X[2][1]*X[3][0] - X[0][2]*X[1][3]*X[2][1]*X[3][0] - X[0][3]*X[1][1]*X[2][2]*X[3][0] + X[0][1]*X[1][3]*X[2][2]*X[3][0]+ X[0][2]*X[1][1]*X[2][3]*X[3][0] - X[0][1]*X[1][2]*X[2][3]*X[3][0] - X[0][3]*X[1][2]*X[2][0]*X[3][1] + X[0][2]*X[1][3]*X[2][0]*X[3][1]+ X[0][3]*X[1][0]*X[2][2]*X[3][1] - X[0][0]*X[1][3]*X[2][2]*X[3][1] - X[0][2]*X[1][0]*X[2][3]*X[3][1] + X[0][0]*X[1][2]*X[2][3]*X[3][1]+ X[0][3]*X[1][1]*X[2][0]*X[3][2] - X[0][1]*X[1][3]*X[2][0]*X[3][2] - X[0][3]*X[1][0]*X[2][1]*X[3][2] + X[0][0]*X[1][3]*X[2][1]*X[3][2]+ X[0][1]*X[1][0]*X[2][3]*X[3][2] - X[0][0]*X[1][1]*X[2][3]*X[3][2] - X[0][2]*X[1][1]*X[2][0]*X[3][3] + X[0][1]*X[1][2]*X[2][0]*X[3][3]+ X[0][2]*X[1][0]*X[2][1]*X[3][3] - X[0][0]*X[1][2]*X[2][1]*X[3][3] - X[0][1]*X[1][0]*X[2][2]*X[3][3] + X[0][0]*X[1][1]*X[2][2]*X[3][3] ); return value; } void invertMat44(double X[][4], double Y[][4]) { double det = calDet44(X); Y[0][0] = X[1][2]*X[2][3]*X[3][1] - X[1][3]*X[2][2]*X[3][1] + X[1][3]*X[2][1]*X[3][2] - X[1][1]*X[2][3]*X[3][2] - X[1][2]*X[2][1]*X[3][3] + X[1][1]*X[2][2]*X[3][3]; Y[0][1] = X[0][3]*X[2][2]*X[3][1] - X[0][2]*X[2][3]*X[3][1] - X[0][3]*X[2][1]*X[3][2] + X[0][1]*X[2][3]*X[3][2] + X[0][2]*X[2][1]*X[3][3] - X[0][1]*X[2][2]*X[3][3]; Y[0][2] = X[0][2]*X[1][3]*X[3][1] - X[0][3]*X[1][2]*X[3][1] + X[0][3]*X[1][1]*X[3][2] - X[0][1]*X[1][3]*X[3][2] - X[0][2]*X[1][1]*X[3][3] + X[0][1]*X[1][2]*X[3][3]; Y[0][3] = X[0][3]*X[1][2]*X[2][1] - X[0][2]*X[1][3]*X[2][1] - X[0][3]*X[1][1]*X[2][2] + X[0][1]*X[1][3]*X[2][2] + X[0][2]*X[1][1]*X[2][3] - X[0][1]*X[1][2]*X[2][3]; Y[1][0] = X[1][3]*X[2][2]*X[3][0] - X[1][2]*X[2][3]*X[3][0] - X[1][3]*X[2][0]*X[3][2] + X[1][0]*X[2][3]*X[3][2] + X[1][2]*X[2][0]*X[3][3] - X[1][0]*X[2][2]*X[3][3]; Y[1][1] = X[0][2]*X[2][3]*X[3][0] - X[0][3]*X[2][2]*X[3][0] + X[0][3]*X[2][0]*X[3][2] - X[0][0]*X[2][3]*X[3][2] - X[0][2]*X[2][0]*X[3][3] + X[0][0]*X[2][2]*X[3][3]; Y[1][2] = X[0][3]*X[1][2]*X[3][0] - X[0][2]*X[1][3]*X[3][0] - X[0][3]*X[1][0]*X[3][2] + X[0][0]*X[1][3]*X[3][2] + X[0][2]*X[1][0]*X[3][3] - X[0][0]*X[1][2]*X[3][3]; Y[1][3] = X[0][2]*X[1][3]*X[2][0] - X[0][3]*X[1][2]*X[2][0] + X[0][3]*X[1][0]*X[2][2] - X[0][0]*X[1][3]*X[2][2] - X[0][2]*X[1][0]*X[2][3] + X[0][0]*X[1][2]*X[2][3]; Y[2][0] = X[1][1]*X[2][3]*X[3][0] - X[1][3]*X[2][1]*X[3][0] + X[1][3]*X[2][0]*X[3][1] - X[1][0]*X[2][3]*X[3][1] - X[1][1]*X[2][0]*X[3][3] + X[1][0]*X[2][1]*X[3][3]; Y[2][1] = X[0][3]*X[2][1]*X[3][0] - X[0][1]*X[2][3]*X[3][0] - X[0][3]*X[2][0]*X[3][1] + X[0][0]*X[2][3]*X[3][1] + X[0][1]*X[2][0]*X[3][3] - X[0][0]*X[2][1]*X[3][3]; Y[2][2] = X[0][1]*X[1][3]*X[3][0] - X[0][3]*X[1][1]*X[3][0] + X[0][3]*X[1][0]*X[3][1] - X[0][0]*X[1][3]*X[3][1] - X[0][1]*X[1][0]*X[3][3] + X[0][0]*X[1][1]*X[3][3]; Y[2][3] = X[0][3]*X[1][1]*X[2][0] - X[0][1]*X[1][3]*X[2][0] - X[0][3]*X[1][0]*X[2][1] + X[0][0]*X[1][3]*X[2][1] + X[0][1]*X[1][0]*X[2][3] - X[0][0]*X[1][1]*X[2][3]; Y[3][0] = X[1][2]*X[2][1]*X[3][0] - X[1][1]*X[2][2]*X[3][0] - X[1][2]*X[2][0]*X[3][1] + X[1][0]*X[2][2]*X[3][1] + X[1][1]*X[2][0]*X[3][2] - X[1][0]*X[2][1]*X[3][2]; Y[3][1] = X[0][1]*X[2][2]*X[3][0] - X[0][2]*X[2][1]*X[3][0] + X[0][2]*X[2][0]*X[3][1] - X[0][0]*X[2][2]*X[3][1] - X[0][1]*X[2][0]*X[3][2] + X[0][0]*X[2][1]*X[3][2]; Y[3][2] = X[0][2]*X[1][1]*X[3][0] - X[0][1]*X[1][2]*X[3][0] - X[0][2]*X[1][0]*X[3][1] + X[0][0]*X[1][2]*X[3][1] + X[0][1]*X[1][0]*X[3][2] - X[0][0]*X[1][1]*X[3][2]; Y[3][3] = X[0][1]*X[1][2]*X[2][0] - X[0][2]*X[1][1]*X[2][0] + X[0][2]*X[1][0]*X[2][1] - X[0][0]*X[1][2]*X[2][1] - X[0][1]*X[1][0]*X[2][2] + X[0][0]*X[1][1]*X[2][2]; for (int i=0; i<4; i++) for (int j=0; j<4; j++) Y[i][j] = Y[i][j]/det; } void invertMat33(double X[][3], double Y[][3]) { double det = X[0][0]* (X[1][1]* X[2][2]- X[2][1]* X[1][2])- X[0][1]* (X[1][0]* X[2][2]- X[1][2]* X[2][0])+ X[0][2]* (X[1][0]* X[2][1]- X[1][1]* X[2][0]); double invdet = 1 / det; Y[0][0]= (X[1][1]* X[2][2]- X[2][1]* X[1][2]) * invdet; Y[0][1]= (X[0][2]* X[2][1]- X[0][1]* X[2][2]) * invdet; Y[0][2]= (X[0][1]* X[1][2]- X[0][2]* X[1][1])* invdet; Y[1][0]= (X[1][2]* X[2][0]- X[1][0]* X[2][2])* invdet; Y[1][1]= (X[0][0]* X[2][2]- X[0][2]* X[2][0])* invdet; Y[1][2]= (X[1][0]* X[0][2]- X[0][0]* X[1][2])* invdet; Y[2][0]= (X[1][0]* X[2][1]- X[2][0]* X[1][1])* invdet; Y[2][1]= (X[2][0]* X[0][1]- X[0][0]* X[2][1])* invdet; Y[2][2]= (X[0][0]* X[1][1]- X[1][0]* X[0][1]) * invdet; } void subtractVec(double *a, double *b, double *c, int s) { for (int i=0; i<s; i++) c[i] = a[i]-b[i]; } void cross(double *u, double *v, double *w) { w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } void negateVec(double *a, int s) { for (int i=0; i<s; i++) a[i] = -a[i]; } //s1,s2,s3: fastest to slowest void sliceImageDouble(double *input, int s1, int s2, int s3, double *output, int indS1) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) { output[i*s2+j] = input[i*s2*s1+j*s1+indS1]*input[i*s2*s1+j*s1+s1-1]; } } unsigned char quantizeDouble(double val, double minVal, double maxVal) { return (val-minVal)*255.0/(maxVal-minVal); } //3D data, fastest to slowest void quantizeImageDouble3D(double *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } static const char *vrInfo = ("program for testing CUDA-based volume rendering"); int main(int argc, const char **argv) { setbuf(stdout, NULL); hestOpt *hopt=NULL; hestParm *hparm; airArray *mop; double fr[3], at[3], up[3], nc, fc, fov, light_dir[3], isoval, raystep, refstep, thickness, alphamax, phongKa, phongKd; int size[2]; const char *me = argv[0]; char *inName, *outName, *outNamePng; mop = airMopNew(); hparm = hestParmNew(); airMopAdd(mop, hparm, (airMopper)hestParmFree, airMopAlways); hparm->noArgsIsNoProblem = true; hestOptAdd(&hopt, "i", "nin", airTypeString, 1, 1, &inName, "parab_20_20_80.nrrd", "input volume to render"); hestOptAdd(&hopt, "fr", "from", airTypeDouble, 3, 3, fr, "-50 0 0", "look-from point"); hestOptAdd(&hopt, "at", "at", airTypeDouble, 3, 3, at, "0 0 0", "look-at point"); hestOptAdd(&hopt, "up", "up", airTypeDouble, 3, 3, up, "0 0 1", "pseudo-up vector"); hestOptAdd(&hopt, "nc", "near-clip", airTypeDouble, 1, 1, &nc, "-50", "near clipping plane"); hestOptAdd(&hopt, "fc", "far-clip", airTypeDouble, 1, 1, &fc, "50", "far clipping plane"); hestOptAdd(&hopt, "fov", "FOV", airTypeDouble, 1, 1, &fov, "10", "field-of-view"); hestOptAdd(&hopt, "ldir", "direction", airTypeDouble, 3, 3, light_dir, "-1 0 0", "direction towards light"); hestOptAdd(&hopt, "isize", "sx sy", airTypeInt, 2, 2, size, "200 200", "output image sizes"); hestOptAdd(&hopt, "iso", "iso-value", airTypeDouble, 1, 1, &isoval, "0", "iso-value"); hestOptAdd(&hopt, "step", "ray-step", airTypeDouble, 1, 1, &raystep, "0.1", "ray traversing step"); hestOptAdd(&hopt, "refstep", "ref-step", airTypeDouble, 1, 1, &refstep, "1", "ref-step"); hestOptAdd(&hopt, "thick", "thickness", airTypeDouble, 1, 1, &thickness, "0.5", "thickness around iso-value"); hestOptAdd(&hopt, "alpha", "max-alpha", airTypeDouble, 1, 1, &alphamax, "1", "maximum value of alpha"); hestOptAdd(&hopt, "phongKa", "phong-Ka", airTypeDouble, 1, 1, &phongKa, "0.2", "Ka value of Phong shading"); hestOptAdd(&hopt, "phongKd", "phong-Kd", airTypeDouble, 1, 1, &phongKd, "0.8", "Kd value of Phong shading"); hestOptAdd(&hopt, "o", "output", airTypeString, 1, 1, &outName, "out.nrrd", "filename for 4-channel double output"); hestOptAdd(&hopt, "op", "output", airTypeString, 1, 1, &outNamePng, "out_1.png", "filename for 1-channel 8-bit output"); hestParseOrDie(hopt, argc-1, argv+1, hparm, me, vrInfo, AIR_TRUE, AIR_TRUE, AIR_TRUE); airMopAdd(mop, hopt, (airMopper)hestOptFree, airMopAlways); airMopAdd(mop, hopt, (airMopper)hestParseFree, airMopAlways); Nrrd *nin=nrrdNew(); airMopAdd(mop, nin, (airMopper)nrrdNix, airMopAlways); NrrdIoState *nio=nrrdIoStateNew(); airMopAdd(mop, nio, (airMopper)nrrdIoStateNix, airMopAlways); nio->skipData = AIR_TRUE; if (nrrdLoad(nin, inName, nio)) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't read input header:\n%s", argv[0], err); airMopError(mop); exit(1); } printf("data will be %u-D array of %s\n", nin->dim, airEnumStr(nrrdType, nin->type)); if (4 == nin->dim && nrrdTypeShort == nin->type) { printf("4D array sizes: %u %u %u %u\n", (unsigned int)(nin->axis[0].size), (unsigned int)(nin->axis[1].size), (unsigned int)(nin->axis[2].size), (unsigned int)(nin->axis[3].size)); /* example allocation */ short *sdata = (short*)calloc(nin->axis[0].size*nin->axis[1].size *nin->axis[2].size*nin->axis[3].size, sizeof(short)); nin->data = (void*)sdata; printf("pre-allocated data at %p\n", nin->data); nio->skipData = AIR_FALSE; //nrrdInit(nin); if (nrrdLoad(nin, inName, NULL)) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't read input data:\n%s", argv[0], err); airMopError(mop); exit(1); } printf("post nrrdLoad: data at %p\n", nin->data); } else { fprintf(stderr, "didn't get 4D short array; no data allocated; fix me"); airMopError(mop); exit(1); } //process input normalize(light_dir,3); hipChannelFormatDesc channelDesc; channelDesc = hipCreateChannelDesc<short>(); /* 2-channel data will have: 4 == nin->dim 3 == nin->spaceDim */ if (4 != nin->dim || 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 4D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } if (nin->axis[3].size != 2) { fprintf(stderr, "%s: need the slowest axis of size 2, (not %uD)\n", argv[0], (unsigned int)nin->axis[3].size); airMopError(mop); exit(1); } double mat_trans[4][4]; mat_trans[3][0] = mat_trans[3][1] = mat_trans[3][2] = 0; mat_trans[3][3] = 1; int dim[4]; dim[0] = nin->axis[3].size; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } double mat_trans_inv[4][4]; invertMat44(mat_trans,mat_trans_inv); double vb0[4] = {0,0,0,1}; double vb1[4] = {1,0,0,1}; double vb2[4] = {0,1,0,1}; double vb3[4] = {0,0,1,1}; double ve0[4],ve1[4],ve2[4],ve3[4]; mulMatPoint(mat_trans,vb0,ve0); mulMatPoint(mat_trans,vb1,ve1); mulMatPoint(mat_trans,vb2,ve2); mulMatPoint(mat_trans,vb3,ve3); subtractVec(ve1,ve0,ve1,3); subtractVec(ve2,ve0,ve2,3); subtractVec(ve3,ve0,ve3,3); double MT_BE[3][3]; MT_BE[0][0] = dotProduct(vb1,ve1,3); MT_BE[0][1] = dotProduct(vb2,ve1,3); MT_BE[0][2] = dotProduct(vb3,ve1,3); MT_BE[1][0] = dotProduct(vb1,ve2,3); MT_BE[1][1] = dotProduct(vb2,ve2,3); MT_BE[1][2] = dotProduct(vb3,ve2,3); MT_BE[2][0] = dotProduct(vb1,ve3,3); MT_BE[2][1] = dotProduct(vb2,ve3,3); MT_BE[2][2] = dotProduct(vb3,ve3,3); double MT_BE_inv[3][3]; invertMat33(MT_BE,MT_BE_inv); //tex3D stuff const hipExtent volumeSize = make_hipExtent(dim[1], dim[2], dim[3]); //hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_volumeArray0, &channelDesc, volumeSize); hipMalloc3DArray(&d_volumeArray1, &channelDesc, volumeSize); // --- Copy data to 3D array (host to device) hipMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_hipPitchedPtr((void*)(((short*)nin->data)+dim[1]*dim[2]*dim[3]), volumeSize.width*sizeof(short), volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1; copyParams1.extent = volumeSize; copyParams1.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams1); hipMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_hipPitchedPtr((void*)((short*)nin->data), volumeSize.width*sizeof(short), volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray0; copyParams0.extent = volumeSize; copyParams0.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams0); // --- Set texture parameters tex1.normalized = false; // access with normalized texture coordinates tex1.filterMode = hipFilterModeLinear; // linear interpolation tex1.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates tex1.addressMode[1] = hipAddressModeWrap; tex1.addressMode[2] = hipAddressModeWrap; tex0.normalized = false; // access with normalized texture coordinates tex0.filterMode = hipFilterModeLinear; // linear interpolation tex0.addressMode[0] = hipAddressModeWrap; // wrap texture coordinates tex0.addressMode[1] = hipAddressModeWrap; tex0.addressMode[2] = hipAddressModeWrap; // --- Bind array to 3D texture hipBindTextureToArray(tex1, d_volumeArray1, channelDesc); hipBindTextureToArray(tex0, d_volumeArray0, channelDesc); //----------- normalize(up,3); double viewdir[3]; subtractVec(at,fr,viewdir,3); double viewdis = lenVec(viewdir,3); double ver_extent = 2*viewdis*tan((fov/2)*PI/180); double hor_extent = (ver_extent/size[1])*size[0]; normalize(viewdir,3); double nviewdir[3]; memcpy(nviewdir,viewdir,sizeof(viewdir)); negateVec(nviewdir,3); double right[3]; cross(up,nviewdir,right); normalize(right,3); //correcting the up vector cross(nviewdir,right,up); normalize(up,3); double center[3]; advancePoint(at,viewdir,nc,center); int nOutChannel = 4; double *imageDouble = new double[size[0]*size[1]*nOutChannel]; //CUDA Var int *d_dim; hipMalloc(&d_dim, sizeof(dim)); hipMemcpy(d_dim, dim, 4*sizeof(int), hipMemcpyHostToDevice); double *d_imageDouble; hipMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); int *d_size; hipMalloc(&d_size,2*sizeof(int)); hipMemcpy(d_size,size,2*sizeof(int), hipMemcpyHostToDevice); double *d_center; hipMalloc(&d_center,3*sizeof(double)); hipMemcpy(d_center,center,3*sizeof(double), hipMemcpyHostToDevice); double *d_viewdir; hipMalloc(&d_viewdir,3*sizeof(double)); hipMemcpy(d_viewdir,viewdir,3*sizeof(double), hipMemcpyHostToDevice); double *d_up; hipMalloc(&d_up,3*sizeof(double)); hipMemcpy(d_up,up,3*sizeof(double), hipMemcpyHostToDevice); double *d_right; hipMalloc(&d_right,3*sizeof(double)); hipMemcpy(d_right,right,3*sizeof(double), hipMemcpyHostToDevice); double *d_light_dir; hipMalloc(&d_light_dir,3*sizeof(double)); hipMemcpy(d_light_dir,light_dir,3*sizeof(double), hipMemcpyHostToDevice); double* d_mat_trans; hipMalloc(&d_mat_trans,16*sizeof(double)); hipMemcpy(d_mat_trans,&mat_trans[0][0],16*sizeof(double), hipMemcpyHostToDevice); double* d_MT_BE_inv; hipMalloc(&d_MT_BE_inv,9*sizeof(double)); hipMemcpy(d_MT_BE_inv,&MT_BE_inv[0][0],9*sizeof(double), hipMemcpyHostToDevice); double* d_mat_trans_inv; hipMalloc(&d_mat_trans_inv,16*sizeof(double)); hipMemcpy(d_mat_trans_inv,&mat_trans_inv[0][0],16*sizeof(double), hipMemcpyHostToDevice); int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); hipLaunchKernelGGL(( kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_dim, d_size, hor_extent, ver_extent, d_center, d_viewdir, d_right, d_up, d_light_dir, nc, fc, raystep, refstep, d_mat_trans, d_mat_trans_inv, d_MT_BE_inv, phongKa, phongKd, isoval, alphamax, thickness, nOutChannel, d_imageDouble ); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); err = hipDeviceSynchronize(); if (err != hipSuccess) printf("Error Sync: %s\n", hipGetErrorString(err)); hipMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, hipMemcpyDeviceToHost); short width = size[0]; short height = size[1]; double *imageSave = new double[size[0]*size[1]]; unsigned char *imageQuantized = new unsigned char[size[0]*size[1]*4]; quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); sliceImageDouble(imageDouble,4,size[0],size[1],imageSave,3); Nrrd *nout = nrrdNew(); Nrrd *ndbl = nrrdNew(); Nrrd *ndbl_1 = nrrdNew(); airMopAdd(mop, nout, (airMopper)nrrdNuke, airMopAlways); airMopAdd(mop, ndbl, (airMopper)nrrdNix, airMopAlways); airMopAdd(mop, ndbl_1, (airMopper)nrrdNix, airMopAlways); //printf("before saving result\n"); if (nrrdWrap_va(ndbl, imageDouble, nrrdTypeDouble, 3, static_cast<size_t>(4), static_cast<size_t>(width), static_cast<size_t>(height)) || nrrdSave(outName,ndbl,NULL) || nrrdWrap_va(ndbl_1, imageSave, nrrdTypeDouble, 2, static_cast<size_t>(width), static_cast<size_t>(height)) || nrrdQuantize(nout, ndbl_1, NULL, 8) || nrrdSave(outNamePng, nout, NULL) ) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't save output:\n%s", argv[0], err); airMopError(mop); exit(1); } airMopOkay(mop); TGAImage *img = new TGAImage(width,height); //declare a temporary color variable Colour c; //Loop through image and set all pixels to red for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.r = imageQuantized[x*width*4+y*4]; c.g = imageQuantized[x*width*4+y*4+1]; c.b = imageQuantized[x*width*4+y*4+2]; c.a = imageQuantized[x*width*4+y*4+3]; img->setPixel(c,x,y); } //write the image to disk string imagename = "test_short.tga"; img->WriteImage(imagename); for (int k=0; k<4; k++) { //Loop through image and set all pixels to red for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.r = c.g = c.b = 0; c.a = 255; switch (k) { case 0: c.r = imageQuantized[x*width*4+y*4]; break; case 1: c.g = imageQuantized[x*width*4+y*4+1]; break; case 2: c.b = imageQuantized[x*width*4+y*4+2]; break; case 3: c.a = imageQuantized[x*width*4+y*4+3]; break; } img->setPixel(c,x,y); } //write the image to disk ostringstream ss; ss << k; string imagename = "test_short_"+ss.str()+".tga"; img->WriteImage(imagename); } delete img; //cleaning up delete[] imageDouble; delete[] imageSave; hipFreeArray(d_volumeArray1); hipFreeArray(d_volumeArray0); hipFree(d_size); hipFree(d_right); hipFree(d_up); hipFree(d_viewdir); hipFree(d_center); hipFree(d_dim); hipFree(d_imageDouble); hipFree(d_mat_trans); hipFree(d_light_dir); hipFree(d_mat_trans_inv); hipFree(d_MT_BE_inv); return 0; }
9058bb8631d78a637bc82615e3d8b0f3cfff80a7.cu
#include "stdio.h" #include <stdlib.h> #include "math.h" #include <cuda_runtime.h> #include <cuda.h> #include "teem/nrrd.h" #include "Image.h" #include "sstream" using namespace std; #define PI 3.14159265 texture<short, 3, cudaReadModeNormalizedFloat> tex0; // 3D texture texture<short, 3, cudaReadModeNormalizedFloat> tex1; // 3D texture cudaArray *d_volumeArray0 = 0; cudaArray *d_volumeArray1 = 0; // w0, w1, w2, and w3 are the four cubic B-spline basis functions __host__ __device__ float w0(float a) { return (1.0f/6.0f)*(a*(a*(-a + 3.0f) - 3.0f) + 1.0f); } __host__ __device__ float w1(float a) { return (1.0f/6.0f)*(a*a*(3.0f*a - 6.0f) + 4.0f); } __host__ __device__ float w2(float a) { return (1.0f/6.0f)*(a*(a*(-3.0f*a + 3.0f) + 3.0f) + 1.0f); } __host__ __device__ float w3(float a) { return (1.0f/6.0f)*(a*a*a); } //derivatives of basic functions __host__ __device__ float w0g(float a) { return -(1.0f/2.0f)*a*a + a - (1.0f/2.0f); } __host__ __device__ float w1g(float a) { return (3.0f/2.0f)*a*a - 2*a; } __host__ __device__ float w2g(float a) { return -(3.0f/2.0f)*a*a + a + (1.0/2.0); } __host__ __device__ float w3g(float a) { return (1.0f/2.0f)*a*a; } // filter 4 values using cubic splines template<class T> __device__ T cubicFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0(x); r += c1 * w1(x); r += c2 * w2(x); r += c3 * w3(x); return r; } //filtering with derivative of basic functions template<class T> __device__ T cubicFilter_G(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0g(x); r += c1 * w1g(x); r += c2 * w2g(x); r += c3 * w3g(x); return r; } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } //gradient in X direction template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GX(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter<R>(fy, cubicFilter_G<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter_G<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> // texture data type, return type __device__ R tex3DBicubicXY_GY(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float px = floor(x); float py = floor(y); float fx = x - px; float fy = y - py; return cubicFilter_G<R>(fy, cubicFilter<R>(fx, tex3D(texref, px-1, py-1,z), tex3D(texref, px, py-1,z), tex3D(texref, px+1, py-1,z), tex3D(texref, px+2,py-1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py,z), tex3D(texref, px, py,z), tex3D(texref, px+1, py,z), tex3D(texref, px+2, py,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+1,z), tex3D(texref, px, py+1,z), tex3D(texref, px+1, py+1,z), tex3D(texref, px+2, py+1,z)), cubicFilter<R>(fx, tex3D(texref, px-1, py+2,z), tex3D(texref, px, py+2,z), tex3D(texref, px+1, py+2,z), tex3D(texref, px+2, py+2,z)) ); } template<class T, class R> __device__ R tex3DBicubic(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GX(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GX<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GX<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GY(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter<R>(fz, tex3DBicubicXY_GY<T,R>(texref,x,y,pz-1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+1), tex3DBicubicXY_GY<T,R>(texref,x,y,pz+2) ); } template<class T, class R> __device__ R tex3DBicubic_GZ(const texture<T, 3, cudaReadModeNormalizedFloat> texref, float x, float y, float z) { float pz = floor(z); float fz = z - pz; return cubicFilter_G<R>(fz, tex3DBicubicXY<T,R>(texref,x,y,pz-1), tex3DBicubicXY<T,R>(texref,x,y,pz), tex3DBicubicXY<T,R>(texref,x,y,pz+1), tex3DBicubicXY<T,R>(texref,x,y,pz+2) ); } __host__ __device__ int cu_getIndex2(int i, int j, int s1, int s2) { return i*s2+j; } __host__ __device__ double dotProduct(double *u, double *v, int s) { double result = 0; for (int i=0; i<s; i++) result += (u[i]*v[i]); return result; } __host__ __device__ double lenVec(double *a, int s) { double len = 0; for (int i=0; i<s; i++) len += (a[i]*a[i]); len = sqrt(len); return len; } void mulMatPoint(double X[4][4], double Y[4], double Z[4]) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[i][k]*Y[k]); } __device__ void cu_mulMatPoint(double* X, double* Y, double* Z) { for (int i=0; i<4; i++) Z[i] = 0; for (int i=0; i<4; i++) for (int k=0; k<4; k++) Z[i] += (X[cu_getIndex2(i,k,4,4)]*Y[k]); } __device__ void cu_mulMatPoint3(double* X, double* Y, double* Z) { for (int i=0; i<3; i++) Z[i] = 0; for (int i=0; i<3; i++) for (int k=0; k<3; k++) Z[i] += (X[cu_getIndex2(i,k,3,3)]*Y[k]); } __host__ __device__ void advancePoint(double* point, double* dir, double scale, double* newpos) { for (int i=0; i<3; i++) newpos[i] = point[i]+dir[i]*scale; } __device__ bool cu_isInsideDouble(double i, double j, double k, int dim1, int dim2, int dim3) { return ((i>=0)&&(i<=(dim1-1))&&(j>=0)&&(j<=(dim2-1))&&(k>=0)&&(k<=(dim3-1))); } __device__ double cu_computeAlpha(double val, double grad_len, double isoval, double alphamax, double thickness) { if ((grad_len == 0.0) && (val == isoval)) return alphamax; else if ((grad_len>0.0) && (isoval >= (val-thickness*grad_len)) && (isoval <= (val+thickness*grad_len))) return alphamax*(1-abs(isoval-val)/(grad_len*thickness)); else return 0.0; } __device__ double cu_inAlpha(double val, double grad_len, double isoval, double thickness) { if (val >= isoval) return 1.0; else { return max(0.0,(1-abs(isoval-val)/(grad_len*thickness))); } } __host__ __device__ void normalize(double *a, int s) { double len = lenVec(a,s); for (int i=0; i<s; i++) a[i] = a[i]/len; } __global__ void kernel(int* dim, int *size, double hor_extent, double ver_extent, double *center, double *viewdir, double *right, double *up, double *light_dir, double nc, double fc, double raystep, double refstep, double* mat_trans, double* mat_trans_inv, double* MT_BE_inv, double phongKa, double phongKd, double isoval, double alphamax, double thickness, int nOutChannel, double* imageDouble ) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; if ((i>=size[0]) || (j>=size[1])) return; double hor_ratio = hor_extent/size[0]; double ver_ratio = ver_extent/size[1]; int ni = i-size[0]/2; int nj = size[1]/2 - j; double startPoint1[4]; startPoint1[3] = 1; advancePoint(center,right,ni*ver_ratio,startPoint1); double startPoint2[4]; startPoint2[3] = 1; advancePoint(startPoint1,up,nj*hor_ratio,startPoint2); memcpy(startPoint1,startPoint2,4*sizeof(double)); double accColor = 0; double transp = 1; double indPoint[4]; double val; double gradi[3]; double gradw[3]; double gradw_len; //double gradi_len; double depth; double pointColor; double alpha; double mipVal = 0; double valgfp; for (double k=0; k<fc-nc; k+=raystep) { advancePoint(startPoint1,viewdir,raystep,startPoint2); cu_mulMatPoint(mat_trans_inv,startPoint1,indPoint); if (cu_isInsideDouble(indPoint[0],indPoint[1],indPoint[2],dim[1],dim[2],dim[3])) { val = tex3DBicubic<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[0] = tex3DBicubic_GX<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[1] = tex3DBicubic_GY<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); gradi[2] = tex3DBicubic_GZ<short,float>(tex1,indPoint[0],indPoint[1],indPoint[2]); cu_mulMatPoint3(MT_BE_inv, gradi, gradw); gradw_len = lenVec(gradw,3); //negating and normalizing for (int l=0; l<3; l++) gradw[l] = -gradw[l]/gradw_len; depth = (k*1.0+1)/(fc*1.0-nc); pointColor = phongKa + depth*phongKd*max(0.0f,dotProduct(gradw,light_dir,3)); alpha = cu_computeAlpha(val, gradw_len, isoval, alphamax, thickness); alpha = 1 - pow(1-alpha,raystep/refstep); transp *= (1-alpha); accColor = accColor*(1-alpha) + pointColor*alpha; valgfp = tex3DBicubic<short,float>(tex0,indPoint[0],indPoint[1],indPoint[2]); mipVal = max(mipVal,valgfp*cu_inAlpha(val,gradw_len,isoval,thickness)); } memcpy(startPoint1,startPoint2,4*sizeof(double)); } double accAlpha = 1 - transp; if (accAlpha>0) { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor/accAlpha; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } else { imageDouble[j*size[0]*nOutChannel+i*nOutChannel] = accColor; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+1] = mipVal; imageDouble[j*size[0]*nOutChannel+i*nOutChannel+2] = 0; } imageDouble[j*size[0]*nOutChannel+i*nOutChannel+nOutChannel-1] = accAlpha; } double calDet44(double X[][4]) { double value = ( X[0][3]*X[1][2]*X[2][1]*X[3][0] - X[0][2]*X[1][3]*X[2][1]*X[3][0] - X[0][3]*X[1][1]*X[2][2]*X[3][0] + X[0][1]*X[1][3]*X[2][2]*X[3][0]+ X[0][2]*X[1][1]*X[2][3]*X[3][0] - X[0][1]*X[1][2]*X[2][3]*X[3][0] - X[0][3]*X[1][2]*X[2][0]*X[3][1] + X[0][2]*X[1][3]*X[2][0]*X[3][1]+ X[0][3]*X[1][0]*X[2][2]*X[3][1] - X[0][0]*X[1][3]*X[2][2]*X[3][1] - X[0][2]*X[1][0]*X[2][3]*X[3][1] + X[0][0]*X[1][2]*X[2][3]*X[3][1]+ X[0][3]*X[1][1]*X[2][0]*X[3][2] - X[0][1]*X[1][3]*X[2][0]*X[3][2] - X[0][3]*X[1][0]*X[2][1]*X[3][2] + X[0][0]*X[1][3]*X[2][1]*X[3][2]+ X[0][1]*X[1][0]*X[2][3]*X[3][2] - X[0][0]*X[1][1]*X[2][3]*X[3][2] - X[0][2]*X[1][1]*X[2][0]*X[3][3] + X[0][1]*X[1][2]*X[2][0]*X[3][3]+ X[0][2]*X[1][0]*X[2][1]*X[3][3] - X[0][0]*X[1][2]*X[2][1]*X[3][3] - X[0][1]*X[1][0]*X[2][2]*X[3][3] + X[0][0]*X[1][1]*X[2][2]*X[3][3] ); return value; } void invertMat44(double X[][4], double Y[][4]) { double det = calDet44(X); Y[0][0] = X[1][2]*X[2][3]*X[3][1] - X[1][3]*X[2][2]*X[3][1] + X[1][3]*X[2][1]*X[3][2] - X[1][1]*X[2][3]*X[3][2] - X[1][2]*X[2][1]*X[3][3] + X[1][1]*X[2][2]*X[3][3]; Y[0][1] = X[0][3]*X[2][2]*X[3][1] - X[0][2]*X[2][3]*X[3][1] - X[0][3]*X[2][1]*X[3][2] + X[0][1]*X[2][3]*X[3][2] + X[0][2]*X[2][1]*X[3][3] - X[0][1]*X[2][2]*X[3][3]; Y[0][2] = X[0][2]*X[1][3]*X[3][1] - X[0][3]*X[1][2]*X[3][1] + X[0][3]*X[1][1]*X[3][2] - X[0][1]*X[1][3]*X[3][2] - X[0][2]*X[1][1]*X[3][3] + X[0][1]*X[1][2]*X[3][3]; Y[0][3] = X[0][3]*X[1][2]*X[2][1] - X[0][2]*X[1][3]*X[2][1] - X[0][3]*X[1][1]*X[2][2] + X[0][1]*X[1][3]*X[2][2] + X[0][2]*X[1][1]*X[2][3] - X[0][1]*X[1][2]*X[2][3]; Y[1][0] = X[1][3]*X[2][2]*X[3][0] - X[1][2]*X[2][3]*X[3][0] - X[1][3]*X[2][0]*X[3][2] + X[1][0]*X[2][3]*X[3][2] + X[1][2]*X[2][0]*X[3][3] - X[1][0]*X[2][2]*X[3][3]; Y[1][1] = X[0][2]*X[2][3]*X[3][0] - X[0][3]*X[2][2]*X[3][0] + X[0][3]*X[2][0]*X[3][2] - X[0][0]*X[2][3]*X[3][2] - X[0][2]*X[2][0]*X[3][3] + X[0][0]*X[2][2]*X[3][3]; Y[1][2] = X[0][3]*X[1][2]*X[3][0] - X[0][2]*X[1][3]*X[3][0] - X[0][3]*X[1][0]*X[3][2] + X[0][0]*X[1][3]*X[3][2] + X[0][2]*X[1][0]*X[3][3] - X[0][0]*X[1][2]*X[3][3]; Y[1][3] = X[0][2]*X[1][3]*X[2][0] - X[0][3]*X[1][2]*X[2][0] + X[0][3]*X[1][0]*X[2][2] - X[0][0]*X[1][3]*X[2][2] - X[0][2]*X[1][0]*X[2][3] + X[0][0]*X[1][2]*X[2][3]; Y[2][0] = X[1][1]*X[2][3]*X[3][0] - X[1][3]*X[2][1]*X[3][0] + X[1][3]*X[2][0]*X[3][1] - X[1][0]*X[2][3]*X[3][1] - X[1][1]*X[2][0]*X[3][3] + X[1][0]*X[2][1]*X[3][3]; Y[2][1] = X[0][3]*X[2][1]*X[3][0] - X[0][1]*X[2][3]*X[3][0] - X[0][3]*X[2][0]*X[3][1] + X[0][0]*X[2][3]*X[3][1] + X[0][1]*X[2][0]*X[3][3] - X[0][0]*X[2][1]*X[3][3]; Y[2][2] = X[0][1]*X[1][3]*X[3][0] - X[0][3]*X[1][1]*X[3][0] + X[0][3]*X[1][0]*X[3][1] - X[0][0]*X[1][3]*X[3][1] - X[0][1]*X[1][0]*X[3][3] + X[0][0]*X[1][1]*X[3][3]; Y[2][3] = X[0][3]*X[1][1]*X[2][0] - X[0][1]*X[1][3]*X[2][0] - X[0][3]*X[1][0]*X[2][1] + X[0][0]*X[1][3]*X[2][1] + X[0][1]*X[1][0]*X[2][3] - X[0][0]*X[1][1]*X[2][3]; Y[3][0] = X[1][2]*X[2][1]*X[3][0] - X[1][1]*X[2][2]*X[3][0] - X[1][2]*X[2][0]*X[3][1] + X[1][0]*X[2][2]*X[3][1] + X[1][1]*X[2][0]*X[3][2] - X[1][0]*X[2][1]*X[3][2]; Y[3][1] = X[0][1]*X[2][2]*X[3][0] - X[0][2]*X[2][1]*X[3][0] + X[0][2]*X[2][0]*X[3][1] - X[0][0]*X[2][2]*X[3][1] - X[0][1]*X[2][0]*X[3][2] + X[0][0]*X[2][1]*X[3][2]; Y[3][2] = X[0][2]*X[1][1]*X[3][0] - X[0][1]*X[1][2]*X[3][0] - X[0][2]*X[1][0]*X[3][1] + X[0][0]*X[1][2]*X[3][1] + X[0][1]*X[1][0]*X[3][2] - X[0][0]*X[1][1]*X[3][2]; Y[3][3] = X[0][1]*X[1][2]*X[2][0] - X[0][2]*X[1][1]*X[2][0] + X[0][2]*X[1][0]*X[2][1] - X[0][0]*X[1][2]*X[2][1] - X[0][1]*X[1][0]*X[2][2] + X[0][0]*X[1][1]*X[2][2]; for (int i=0; i<4; i++) for (int j=0; j<4; j++) Y[i][j] = Y[i][j]/det; } void invertMat33(double X[][3], double Y[][3]) { double det = X[0][0]* (X[1][1]* X[2][2]- X[2][1]* X[1][2])- X[0][1]* (X[1][0]* X[2][2]- X[1][2]* X[2][0])+ X[0][2]* (X[1][0]* X[2][1]- X[1][1]* X[2][0]); double invdet = 1 / det; Y[0][0]= (X[1][1]* X[2][2]- X[2][1]* X[1][2]) * invdet; Y[0][1]= (X[0][2]* X[2][1]- X[0][1]* X[2][2]) * invdet; Y[0][2]= (X[0][1]* X[1][2]- X[0][2]* X[1][1])* invdet; Y[1][0]= (X[1][2]* X[2][0]- X[1][0]* X[2][2])* invdet; Y[1][1]= (X[0][0]* X[2][2]- X[0][2]* X[2][0])* invdet; Y[1][2]= (X[1][0]* X[0][2]- X[0][0]* X[1][2])* invdet; Y[2][0]= (X[1][0]* X[2][1]- X[2][0]* X[1][1])* invdet; Y[2][1]= (X[2][0]* X[0][1]- X[0][0]* X[2][1])* invdet; Y[2][2]= (X[0][0]* X[1][1]- X[1][0]* X[0][1]) * invdet; } void subtractVec(double *a, double *b, double *c, int s) { for (int i=0; i<s; i++) c[i] = a[i]-b[i]; } void cross(double *u, double *v, double *w) { w[0] = u[1]*v[2]-u[2]*v[1]; w[1] = u[2]*v[0]-u[0]*v[2]; w[2] = u[0]*v[1]-u[1]*v[0]; } void negateVec(double *a, int s) { for (int i=0; i<s; i++) a[i] = -a[i]; } //s1,s2,s3: fastest to slowest void sliceImageDouble(double *input, int s1, int s2, int s3, double *output, int indS1) { for (int i=0; i<s3; i++) for (int j=0; j<s2; j++) { output[i*s2+j] = input[i*s2*s1+j*s1+indS1]*input[i*s2*s1+j*s1+s1-1]; } } unsigned char quantizeDouble(double val, double minVal, double maxVal) { return (val-minVal)*255.0/(maxVal-minVal); } //3D data, fastest to slowest void quantizeImageDouble3D(double *input, unsigned char *output, int s0, int s1, int s2) { double maxVal[4]; maxVal[0] = maxVal[1] = maxVal[2] = maxVal[3] = -(1<<15); double minVal[4]; minVal[0] = minVal[1] = minVal[2] = minVal[3] = ((1<<15) - 1); for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { if (input[i*s1*s0+j*s0+k]>maxVal[k]) maxVal[k] = input[i*s1*s0+j*s0+k]; if (input[i*s1*s0+j*s0+k]<minVal[k]) minVal[k] = input[i*s1*s0+j*s0+k]; } for (int i=0; i<s2; i++) for (int j=0; j<s1; j++) for (int k=0; k<s0; k++) { output[i*s1*s0+j*s0+k] = quantizeDouble(input[i*s1*s0+j*s0+k],minVal[k],maxVal[k]); } } static const char *vrInfo = ("program for testing CUDA-based volume rendering"); int main(int argc, const char **argv) { setbuf(stdout, NULL); hestOpt *hopt=NULL; hestParm *hparm; airArray *mop; double fr[3], at[3], up[3], nc, fc, fov, light_dir[3], isoval, raystep, refstep, thickness, alphamax, phongKa, phongKd; int size[2]; const char *me = argv[0]; char *inName, *outName, *outNamePng; mop = airMopNew(); hparm = hestParmNew(); airMopAdd(mop, hparm, (airMopper)hestParmFree, airMopAlways); hparm->noArgsIsNoProblem = true; hestOptAdd(&hopt, "i", "nin", airTypeString, 1, 1, &inName, "parab_20_20_80.nrrd", "input volume to render"); hestOptAdd(&hopt, "fr", "from", airTypeDouble, 3, 3, fr, "-50 0 0", "look-from point"); hestOptAdd(&hopt, "at", "at", airTypeDouble, 3, 3, at, "0 0 0", "look-at point"); hestOptAdd(&hopt, "up", "up", airTypeDouble, 3, 3, up, "0 0 1", "pseudo-up vector"); hestOptAdd(&hopt, "nc", "near-clip", airTypeDouble, 1, 1, &nc, "-50", "near clipping plane"); hestOptAdd(&hopt, "fc", "far-clip", airTypeDouble, 1, 1, &fc, "50", "far clipping plane"); hestOptAdd(&hopt, "fov", "FOV", airTypeDouble, 1, 1, &fov, "10", "field-of-view"); hestOptAdd(&hopt, "ldir", "direction", airTypeDouble, 3, 3, light_dir, "-1 0 0", "direction towards light"); hestOptAdd(&hopt, "isize", "sx sy", airTypeInt, 2, 2, size, "200 200", "output image sizes"); hestOptAdd(&hopt, "iso", "iso-value", airTypeDouble, 1, 1, &isoval, "0", "iso-value"); hestOptAdd(&hopt, "step", "ray-step", airTypeDouble, 1, 1, &raystep, "0.1", "ray traversing step"); hestOptAdd(&hopt, "refstep", "ref-step", airTypeDouble, 1, 1, &refstep, "1", "ref-step"); hestOptAdd(&hopt, "thick", "thickness", airTypeDouble, 1, 1, &thickness, "0.5", "thickness around iso-value"); hestOptAdd(&hopt, "alpha", "max-alpha", airTypeDouble, 1, 1, &alphamax, "1", "maximum value of alpha"); hestOptAdd(&hopt, "phongKa", "phong-Ka", airTypeDouble, 1, 1, &phongKa, "0.2", "Ka value of Phong shading"); hestOptAdd(&hopt, "phongKd", "phong-Kd", airTypeDouble, 1, 1, &phongKd, "0.8", "Kd value of Phong shading"); hestOptAdd(&hopt, "o", "output", airTypeString, 1, 1, &outName, "out.nrrd", "filename for 4-channel double output"); hestOptAdd(&hopt, "op", "output", airTypeString, 1, 1, &outNamePng, "out_1.png", "filename for 1-channel 8-bit output"); hestParseOrDie(hopt, argc-1, argv+1, hparm, me, vrInfo, AIR_TRUE, AIR_TRUE, AIR_TRUE); airMopAdd(mop, hopt, (airMopper)hestOptFree, airMopAlways); airMopAdd(mop, hopt, (airMopper)hestParseFree, airMopAlways); Nrrd *nin=nrrdNew(); airMopAdd(mop, nin, (airMopper)nrrdNix, airMopAlways); NrrdIoState *nio=nrrdIoStateNew(); airMopAdd(mop, nio, (airMopper)nrrdIoStateNix, airMopAlways); nio->skipData = AIR_TRUE; if (nrrdLoad(nin, inName, nio)) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't read input header:\n%s", argv[0], err); airMopError(mop); exit(1); } printf("data will be %u-D array of %s\n", nin->dim, airEnumStr(nrrdType, nin->type)); if (4 == nin->dim && nrrdTypeShort == nin->type) { printf("4D array sizes: %u %u %u %u\n", (unsigned int)(nin->axis[0].size), (unsigned int)(nin->axis[1].size), (unsigned int)(nin->axis[2].size), (unsigned int)(nin->axis[3].size)); /* example allocation */ short *sdata = (short*)calloc(nin->axis[0].size*nin->axis[1].size *nin->axis[2].size*nin->axis[3].size, sizeof(short)); nin->data = (void*)sdata; printf("pre-allocated data at %p\n", nin->data); nio->skipData = AIR_FALSE; //nrrdInit(nin); if (nrrdLoad(nin, inName, NULL)) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't read input data:\n%s", argv[0], err); airMopError(mop); exit(1); } printf("post nrrdLoad: data at %p\n", nin->data); } else { fprintf(stderr, "didn't get 4D short array; no data allocated; fix me"); airMopError(mop); exit(1); } //process input normalize(light_dir,3); cudaChannelFormatDesc channelDesc; channelDesc = cudaCreateChannelDesc<short>(); /* 2-channel data will have: 4 == nin->dim 3 == nin->spaceDim */ if (4 != nin->dim || 3 != nin->spaceDim) { fprintf(stderr, "%s: need 3D array in 4D space, (not %uD in %uD)\n", argv[0], nin->dim, nin->spaceDim); airMopError(mop); exit(1); } if (nin->axis[3].size != 2) { fprintf(stderr, "%s: need the slowest axis of size 2, (not %uD)\n", argv[0], (unsigned int)nin->axis[3].size); airMopError(mop); exit(1); } double mat_trans[4][4]; mat_trans[3][0] = mat_trans[3][1] = mat_trans[3][2] = 0; mat_trans[3][3] = 1; int dim[4]; dim[0] = nin->axis[3].size; dim[1] = nin->axis[0].size; dim[2] = nin->axis[1].size; dim[3] = nin->axis[2].size; for (int i=0; i<3; i++) { for (int j=0; j<3; j++) { mat_trans[j][i] = nin->axis[i].spaceDirection[j]; } mat_trans[i][3] = nin->spaceOrigin[i]; } double mat_trans_inv[4][4]; invertMat44(mat_trans,mat_trans_inv); double vb0[4] = {0,0,0,1}; double vb1[4] = {1,0,0,1}; double vb2[4] = {0,1,0,1}; double vb3[4] = {0,0,1,1}; double ve0[4],ve1[4],ve2[4],ve3[4]; mulMatPoint(mat_trans,vb0,ve0); mulMatPoint(mat_trans,vb1,ve1); mulMatPoint(mat_trans,vb2,ve2); mulMatPoint(mat_trans,vb3,ve3); subtractVec(ve1,ve0,ve1,3); subtractVec(ve2,ve0,ve2,3); subtractVec(ve3,ve0,ve3,3); double MT_BE[3][3]; MT_BE[0][0] = dotProduct(vb1,ve1,3); MT_BE[0][1] = dotProduct(vb2,ve1,3); MT_BE[0][2] = dotProduct(vb3,ve1,3); MT_BE[1][0] = dotProduct(vb1,ve2,3); MT_BE[1][1] = dotProduct(vb2,ve2,3); MT_BE[1][2] = dotProduct(vb3,ve2,3); MT_BE[2][0] = dotProduct(vb1,ve3,3); MT_BE[2][1] = dotProduct(vb2,ve3,3); MT_BE[2][2] = dotProduct(vb3,ve3,3); double MT_BE_inv[3][3]; invertMat33(MT_BE,MT_BE_inv); //tex3D stuff const cudaExtent volumeSize = make_cudaExtent(dim[1], dim[2], dim[3]); //cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_volumeArray0, &channelDesc, volumeSize); cudaMalloc3DArray(&d_volumeArray1, &channelDesc, volumeSize); // --- Copy data to 3D array (host to device) cudaMemcpy3DParms copyParams1 = {0}; copyParams1.srcPtr = make_cudaPitchedPtr((void*)(((short*)nin->data)+dim[1]*dim[2]*dim[3]), volumeSize.width*sizeof(short), volumeSize.width, volumeSize.height); copyParams1.dstArray = d_volumeArray1; copyParams1.extent = volumeSize; copyParams1.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams1); cudaMemcpy3DParms copyParams0 = {0}; copyParams0.srcPtr = make_cudaPitchedPtr((void*)((short*)nin->data), volumeSize.width*sizeof(short), volumeSize.width, volumeSize.height); copyParams0.dstArray = d_volumeArray0; copyParams0.extent = volumeSize; copyParams0.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams0); // --- Set texture parameters tex1.normalized = false; // access with normalized texture coordinates tex1.filterMode = cudaFilterModeLinear; // linear interpolation tex1.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates tex1.addressMode[1] = cudaAddressModeWrap; tex1.addressMode[2] = cudaAddressModeWrap; tex0.normalized = false; // access with normalized texture coordinates tex0.filterMode = cudaFilterModeLinear; // linear interpolation tex0.addressMode[0] = cudaAddressModeWrap; // wrap texture coordinates tex0.addressMode[1] = cudaAddressModeWrap; tex0.addressMode[2] = cudaAddressModeWrap; // --- Bind array to 3D texture cudaBindTextureToArray(tex1, d_volumeArray1, channelDesc); cudaBindTextureToArray(tex0, d_volumeArray0, channelDesc); //----------- normalize(up,3); double viewdir[3]; subtractVec(at,fr,viewdir,3); double viewdis = lenVec(viewdir,3); double ver_extent = 2*viewdis*tan((fov/2)*PI/180); double hor_extent = (ver_extent/size[1])*size[0]; normalize(viewdir,3); double nviewdir[3]; memcpy(nviewdir,viewdir,sizeof(viewdir)); negateVec(nviewdir,3); double right[3]; cross(up,nviewdir,right); normalize(right,3); //correcting the up vector cross(nviewdir,right,up); normalize(up,3); double center[3]; advancePoint(at,viewdir,nc,center); int nOutChannel = 4; double *imageDouble = new double[size[0]*size[1]*nOutChannel]; //CUDA Var int *d_dim; cudaMalloc(&d_dim, sizeof(dim)); cudaMemcpy(d_dim, dim, 4*sizeof(int), cudaMemcpyHostToDevice); double *d_imageDouble; cudaMalloc(&d_imageDouble,sizeof(double)*size[0]*size[1]*nOutChannel); int *d_size; cudaMalloc(&d_size,2*sizeof(int)); cudaMemcpy(d_size,size,2*sizeof(int), cudaMemcpyHostToDevice); double *d_center; cudaMalloc(&d_center,3*sizeof(double)); cudaMemcpy(d_center,center,3*sizeof(double), cudaMemcpyHostToDevice); double *d_viewdir; cudaMalloc(&d_viewdir,3*sizeof(double)); cudaMemcpy(d_viewdir,viewdir,3*sizeof(double), cudaMemcpyHostToDevice); double *d_up; cudaMalloc(&d_up,3*sizeof(double)); cudaMemcpy(d_up,up,3*sizeof(double), cudaMemcpyHostToDevice); double *d_right; cudaMalloc(&d_right,3*sizeof(double)); cudaMemcpy(d_right,right,3*sizeof(double), cudaMemcpyHostToDevice); double *d_light_dir; cudaMalloc(&d_light_dir,3*sizeof(double)); cudaMemcpy(d_light_dir,light_dir,3*sizeof(double), cudaMemcpyHostToDevice); double* d_mat_trans; cudaMalloc(&d_mat_trans,16*sizeof(double)); cudaMemcpy(d_mat_trans,&mat_trans[0][0],16*sizeof(double), cudaMemcpyHostToDevice); double* d_MT_BE_inv; cudaMalloc(&d_MT_BE_inv,9*sizeof(double)); cudaMemcpy(d_MT_BE_inv,&MT_BE_inv[0][0],9*sizeof(double), cudaMemcpyHostToDevice); double* d_mat_trans_inv; cudaMalloc(&d_mat_trans_inv,16*sizeof(double)); cudaMemcpy(d_mat_trans_inv,&mat_trans_inv[0][0],16*sizeof(double), cudaMemcpyHostToDevice); int numThread1D = 16; dim3 threadsPerBlock(numThread1D,numThread1D); dim3 numBlocks((size[0]+numThread1D-1)/numThread1D,(size[1]+numThread1D-1)/numThread1D); kernel<<<numBlocks,threadsPerBlock>>>(d_dim, d_size, hor_extent, ver_extent, d_center, d_viewdir, d_right, d_up, d_light_dir, nc, fc, raystep, refstep, d_mat_trans, d_mat_trans_inv, d_MT_BE_inv, phongKa, phongKd, isoval, alphamax, thickness, nOutChannel, d_imageDouble ); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); err = cudaDeviceSynchronize(); if (err != cudaSuccess) printf("Error Sync: %s\n", cudaGetErrorString(err)); cudaMemcpy(imageDouble, d_imageDouble, sizeof(double)*size[0]*size[1]*nOutChannel, cudaMemcpyDeviceToHost); short width = size[0]; short height = size[1]; double *imageSave = new double[size[0]*size[1]]; unsigned char *imageQuantized = new unsigned char[size[0]*size[1]*4]; quantizeImageDouble3D(imageDouble,imageQuantized,4,size[0],size[1]); sliceImageDouble(imageDouble,4,size[0],size[1],imageSave,3); Nrrd *nout = nrrdNew(); Nrrd *ndbl = nrrdNew(); Nrrd *ndbl_1 = nrrdNew(); airMopAdd(mop, nout, (airMopper)nrrdNuke, airMopAlways); airMopAdd(mop, ndbl, (airMopper)nrrdNix, airMopAlways); airMopAdd(mop, ndbl_1, (airMopper)nrrdNix, airMopAlways); //printf("before saving result\n"); if (nrrdWrap_va(ndbl, imageDouble, nrrdTypeDouble, 3, static_cast<size_t>(4), static_cast<size_t>(width), static_cast<size_t>(height)) || nrrdSave(outName,ndbl,NULL) || nrrdWrap_va(ndbl_1, imageSave, nrrdTypeDouble, 2, static_cast<size_t>(width), static_cast<size_t>(height)) || nrrdQuantize(nout, ndbl_1, NULL, 8) || nrrdSave(outNamePng, nout, NULL) ) { char *err = biffGetDone(NRRD); airMopAdd(mop, err, airFree, airMopAlways); printf("%s: couldn't save output:\n%s", argv[0], err); airMopError(mop); exit(1); } airMopOkay(mop); TGAImage *img = new TGAImage(width,height); //declare a temporary color variable Colour c; //Loop through image and set all pixels to red for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.r = imageQuantized[x*width*4+y*4]; c.g = imageQuantized[x*width*4+y*4+1]; c.b = imageQuantized[x*width*4+y*4+2]; c.a = imageQuantized[x*width*4+y*4+3]; img->setPixel(c,x,y); } //write the image to disk string imagename = "test_short.tga"; img->WriteImage(imagename); for (int k=0; k<4; k++) { //Loop through image and set all pixels to red for(int x=0; x<height; x++) for(int y=0; y<width; y++) { c.r = c.g = c.b = 0; c.a = 255; switch (k) { case 0: c.r = imageQuantized[x*width*4+y*4]; break; case 1: c.g = imageQuantized[x*width*4+y*4+1]; break; case 2: c.b = imageQuantized[x*width*4+y*4+2]; break; case 3: c.a = imageQuantized[x*width*4+y*4+3]; break; } img->setPixel(c,x,y); } //write the image to disk ostringstream ss; ss << k; string imagename = "test_short_"+ss.str()+".tga"; img->WriteImage(imagename); } delete img; //cleaning up delete[] imageDouble; delete[] imageSave; cudaFreeArray(d_volumeArray1); cudaFreeArray(d_volumeArray0); cudaFree(d_size); cudaFree(d_right); cudaFree(d_up); cudaFree(d_viewdir); cudaFree(d_center); cudaFree(d_dim); cudaFree(d_imageDouble); cudaFree(d_mat_trans); cudaFree(d_light_dir); cudaFree(d_mat_trans_inv); cudaFree(d_MT_BE_inv); return 0; }
294105d4fd142d51f0f2da1ee348f561efc10ffb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/layer_norm_op.h" #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> __global__ void ComputeStdDevAndFusedParamsCUDAKernel( const int N, const T epsilon, const T* mean, const T* var, T* stddev, T* scale, T* bias); template <> __global__ void ComputeStdDevAndFusedParamsCUDAKernel<float>( const int N, const float epsilon, const float* mean, const float* var, float* stddev, float* scale, float* bias) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 const float rstd = rsqrtf(__ldg(var + i) + epsilon); stddev[i] = rstd * (__ldg(var + i) + epsilon); scale[i] = rstd; bias[i] = -rstd * __ldg(mean + i); #else const float rstd = rsqrtf(var[i] + epsilon); stddev[i] = rstd * (var[i] + epsilon); scale[i] = rstd; bias[i] = -rstd * mean[i]; #endif } } template <typename T> __global__ void LayerNormForwardCUDAKernel( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float scale_val = __ldg(scale + i); const float bias_val = __ldg(bias + i); #else const float scale_val = scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 Y[index] = __ldg(X + index) * scale_val + bias_val; #else Y[index] = X[index] * scale_val + bias_val; #endif } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < M; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(dY + index) * __ldg(X + index); db_val += __ldg(dY + index); #else ds_val += dY[index] * X[index]; db_val += dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Sum(ds_val); db_val = BlockReduce<T>(db_storage).Sum(db_val); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { const T scale = T(1) / static_cast<T>(N); CUDA_1D_KERNEL_LOOP(i, M) { #if __CUDA_ARCH__ >= 350 const T rsig = T(1) / __ldg(sig + i); const T X_scale_val = (__ldg(db + i) * __ldg(mean + i) - __ldg(ds + i)) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * __ldg(mean + i) - __ldg(db + i) * rsig * scale; #else const T rsig = T(1) / sig[i]; const T X_scale_val = (db[i] * mean[i] - ds[i]) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * mean[i] - db[i] * rsig * scale; #endif } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float dY_scale_val = __ldg(dY_scale + i); const float X_scale_val = __ldg(X_scale + i); const float bias_val = __ldg(bias + i); #else const float dY_scale_val = dY_scale[i]; const float X_scale_val = X_scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 dX[index] = __ldg(dY + index) * dY_scale_val + __ldg(X + index) * X_scale_val + bias_val; #else dX[index] = dY[index] * dY_scale_val + X[index] * X_scale_val + bias_val; #endif } } } } // namespace template <> template <typename T> void LayerNormOp<CUDAContext>::ComputeStdDevAndFusedParams( const int N, const T* mean, const T* var, T* stddev, T* scale, T* bias, float epsilon, CUDAContext* context) { hipLaunchKernelGGL(( ComputeStdDevAndFusedParamsCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, static_cast<T>(epsilon), mean, var, stddev, scale, bias); } template <> template <typename T> void LayerNormOp<CUDAContext>::LayerNormForward( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y, CUDAContext* context) { hipLaunchKernelGGL(( LayerNormForwardCUDAKernel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), M, N, X, scale, bias, Y); } REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>); template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { hipLaunchKernelGGL(( ComputeInternalGradientsCUDAKernel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, dY, X, ds, db); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeFusedParams( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { hipLaunchKernelGGL(( ComputeFusedParamsCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(M)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, mean, sig, ds, db, dY_scale, X_scale, bias); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::LayerNormBackward( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { hipLaunchKernelGGL(( LayerNormBackwardCUDAKenrel<T>) , dim3(::min(M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, dY_scale, dY, X_scale, X, bias, dX); } REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>); } // namespace caffe2
294105d4fd142d51f0f2da1ee348f561efc10ffb.cu
#include "caffe2/operators/layer_norm_op.h" #include <cub/cub.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" #include "caffe2/utils/math_utils.h" namespace caffe2 { namespace { template <typename T> using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>; template <typename T> __global__ void ComputeStdDevAndFusedParamsCUDAKernel( const int N, const T epsilon, const T* mean, const T* var, T* stddev, T* scale, T* bias); template <> __global__ void ComputeStdDevAndFusedParamsCUDAKernel<float>( const int N, const float epsilon, const float* mean, const float* var, float* stddev, float* scale, float* bias) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 const float rstd = rsqrtf(__ldg(var + i) + epsilon); stddev[i] = rstd * (__ldg(var + i) + epsilon); scale[i] = rstd; bias[i] = -rstd * __ldg(mean + i); #else const float rstd = rsqrtf(var[i] + epsilon); stddev[i] = rstd * (var[i] + epsilon); scale[i] = rstd; bias[i] = -rstd * mean[i]; #endif } } template <typename T> __global__ void LayerNormForwardCUDAKernel( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float scale_val = __ldg(scale + i); const float bias_val = __ldg(bias + i); #else const float scale_val = scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 Y[index] = __ldg(X + index) * scale_val + bias_val; #else Y[index] = X[index] * scale_val + bias_val; #endif } } } template <typename T> __global__ void ComputeInternalGradientsCUDAKernel( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { __shared__ typename BlockReduce<T>::TempStorage ds_storage; __shared__ typename BlockReduce<T>::TempStorage db_storage; for (int i = blockIdx.x; i < M; i += gridDim.x) { T ds_val = 0; T db_val = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 ds_val += __ldg(dY + index) * __ldg(X + index); db_val += __ldg(dY + index); #else ds_val += dY[index] * X[index]; db_val += dY[index]; #endif } ds_val = BlockReduce<T>(ds_storage).Sum(ds_val); db_val = BlockReduce<T>(db_storage).Sum(db_val); if (threadIdx.x == 0) { ds[i] = ds_val; db[i] = db_val; } __syncthreads(); } } template <typename T> __global__ void ComputeFusedParamsCUDAKernel( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { const T scale = T(1) / static_cast<T>(N); CUDA_1D_KERNEL_LOOP(i, M) { #if __CUDA_ARCH__ >= 350 const T rsig = T(1) / __ldg(sig + i); const T X_scale_val = (__ldg(db + i) * __ldg(mean + i) - __ldg(ds + i)) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * __ldg(mean + i) - __ldg(db + i) * rsig * scale; #else const T rsig = T(1) / sig[i]; const T X_scale_val = (db[i] * mean[i] - ds[i]) * math::utils::Cube<T>(rsig) * scale; dY_scale[i] = rsig; X_scale[i] = X_scale_val; bias[i] = -X_scale_val * mean[i] - db[i] * rsig * scale; #endif } } template <typename T> __global__ void LayerNormBackwardCUDAKenrel( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { for (int i = blockIdx.x; i < M; i += gridDim.x) { #if __CUDA_ARCH__ >= 350 const float dY_scale_val = __ldg(dY_scale + i); const float X_scale_val = __ldg(X_scale + i); const float bias_val = __ldg(bias + i); #else const float dY_scale_val = dY_scale[i]; const float X_scale_val = X_scale[i]; const float bias_val = bias[i]; #endif for (int j = threadIdx.x; j < N; j += blockDim.x) { const int index = i * N + j; #if __CUDA_ARCH__ >= 350 dX[index] = __ldg(dY + index) * dY_scale_val + __ldg(X + index) * X_scale_val + bias_val; #else dX[index] = dY[index] * dY_scale_val + X[index] * X_scale_val + bias_val; #endif } } } } // namespace template <> template <typename T> void LayerNormOp<CUDAContext>::ComputeStdDevAndFusedParams( const int N, const T* mean, const T* var, T* stddev, T* scale, T* bias, float epsilon, CUDAContext* context) { ComputeStdDevAndFusedParamsCUDAKernel<T> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, static_cast<T>(epsilon), mean, var, stddev, scale, bias); } template <> template <typename T> void LayerNormOp<CUDAContext>::LayerNormForward( const int M, const int N, const T* X, const T* scale, const T* bias, T* Y, CUDAContext* context) { LayerNormForwardCUDAKernel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(M, N, X, scale, bias, Y); } REGISTER_CUDA_OPERATOR(LayerNorm, LayerNormOp<CUDAContext>); template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeInternalGradients( const int M, const int N, const T* dY, const T* X, T* ds, T* db) { ComputeInternalGradientsCUDAKernel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(M, N, dY, X, ds, db); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::ComputeFusedParams( const int M, const int N, const T* mean, const T* sig, const T* ds, const T* db, T* dY_scale, T* X_scale, T* bias) { ComputeFusedParamsCUDAKernel<T> <<<CAFFE_GET_BLOCKS(M), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( M, N, mean, sig, ds, db, dY_scale, X_scale, bias); } template <> template <typename T> void LayerNormGradientOp<CUDAContext>::LayerNormBackward( const int M, const int N, const T* dY_scale, const T* dY, const T* X_scale, const T* X, const T* bias, T* dX) { LayerNormBackwardCUDAKenrel<T> <<<std::min(M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(M, N, dY_scale, dY, X_scale, X, bias, dX); } REGISTER_CUDA_OPERATOR(LayerNormGradient, LayerNormGradientOp<CUDAContext>); } // namespace caffe2
a3de264f7ee4100367bfeb20a800d348d29e2e0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //pass //--gridDim=64 --blockDim=256 --warp-sync=32 template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n); template __global__ void reduce5<int,256>(int *g_idata, int *g_odata, unsigned int n); #include "common.h" template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
a3de264f7ee4100367bfeb20a800d348d29e2e0b.cu
//pass //--gridDim=64 --blockDim=256 --warp-sync=32 template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n); template __global__ void reduce5<int,256>(int *g_idata, int *g_odata, unsigned int n); #include "common.h" template <class T, unsigned int blockSize> __global__ void reduce5(T *g_idata, T *g_odata, unsigned int n) { T *sdata = SharedMemory<T>(); // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; T mySum = (i < n) ? g_idata[i] : 0; if (i + blockSize < n) mySum += g_idata[i+blockSize]; sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T *smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
2b88e49d4d121742ce86c4387eabece0155e56ea.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "SparseConvOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/sparse_conv/SparseConv.cuh" using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TIndex, class TKernelIndex> class SparseConvOpKernelCUDA : public SparseConvOpKernel<TIndex> { public: explicit SparseConvOpKernelCUDA(OpKernelConstruction* construction) : SparseConvOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_kernel_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const std::vector<int>& filter_dims, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& out_features) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), neighbors_row_splits.dim_size(0) - 1, inp_features.dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), this->normalize); temp_size = ::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation SparseConvComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), neighbors_row_splits.dim_size(0) - 1, inp_features.dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, indextype, kernelindextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DSparseConv") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<indextype>("TIndex") \ .TypeConstraint<kernelindextype>("TKernelIndex"), \ SparseConvOpKernelCUDA<feattype, outtype, indextype, \ kernelindextype>); REG_KB(float, float, int32, int16) REG_KB(float, float, int32, uint8_t) #undef REG_KB
2b88e49d4d121742ce86c4387eabece0155e56ea.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2020 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #define EIGEN_USE_GPU #include "SparseConvOpKernel.h" #include "open3d/ml/Helper.h" #include "open3d/ml/impl/sparse_conv/SparseConv.cuh" using namespace open3d::ml; using namespace open3d::ml::impl; using namespace tensorflow; template <class TFeat, class TOut, class TIndex, class TKernelIndex> class SparseConvOpKernelCUDA : public SparseConvOpKernel<TIndex> { public: explicit SparseConvOpKernelCUDA(OpKernelConstruction* construction) : SparseConvOpKernel<TIndex>(construction) { texture_alignment = GetCUDACurrentDeviceTextureAlignment(); } void Kernel(tensorflow::OpKernelContext* context, const tensorflow::Tensor& filter, const tensorflow::Tensor& inp_features, const tensorflow::Tensor& inp_importance, const tensorflow::Tensor& neighbors_index, const tensorflow::Tensor& neighbors_kernel_index, const tensorflow::Tensor& neighbors_importance, const tensorflow::Tensor& neighbors_row_splits, const std::vector<int>& filter_dims, const bool point_importances, const bool has_neighbors_importances, tensorflow::Tensor& out_features) { auto device = context->eigen_gpu_device(); void* temp_ptr = nullptr; size_t temp_size = 0; size_t max_temp_size = 0; // determine temp_size SparseConvComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), neighbors_row_splits.dim_size(0) - 1, inp_features.dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), this->normalize); temp_size = std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024, max_temp_size), temp_size); Tensor temp_tensor; TensorShape temp_shape({ssize_t(temp_size)}); OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<uint8_t>::v(), temp_shape, &temp_tensor)); temp_ptr = temp_tensor.flat<uint8_t>().data(); // actually run the operation SparseConvComputeFeaturesCUDA<TFeat, TOut, TIndex, TKernelIndex>( device.stream(), temp_ptr, temp_size, max_temp_size, texture_alignment, out_features.flat<TOut>().data(), filter_dims, filter.flat<TFeat>().data(), neighbors_row_splits.dim_size(0) - 1, inp_features.dim_size(0), inp_features.flat<TFeat>().data(), point_importances ? inp_importance.flat<TFeat>().data() : nullptr, neighbors_index.shape().dim_size(0), (TIndex*)neighbors_index.flat<TIndex>().data(), (TKernelIndex*)neighbors_kernel_index.flat<TKernelIndex>() .data(), has_neighbors_importances ? neighbors_importance.flat<TFeat>().data() : nullptr, (int64_t*)neighbors_row_splits.flat<int64>().data(), this->normalize); } private: int texture_alignment; }; #define REG_KB(feattype, outtype, indextype, kernelindextype) \ REGISTER_KERNEL_BUILDER( \ Name("Open3DSparseConv") \ .Device(DEVICE_GPU) \ .TypeConstraint<feattype>("TFeat") \ .TypeConstraint<outtype>("output_type") \ .TypeConstraint<indextype>("TIndex") \ .TypeConstraint<kernelindextype>("TKernelIndex"), \ SparseConvOpKernelCUDA<feattype, outtype, indextype, \ kernelindextype>); REG_KB(float, float, int32, int16) REG_KB(float, float, int32, uint8_t) #undef REG_KB
a0e9c3eac87c1eedc1c613f46d3fd0a79bb6c4e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/adam_impl.cuh" template <typename T> __device__ __forceinline__ T SqrtFunc(T input) { return sqrt(input); } template <> __device__ __forceinline__ half SqrtFunc(half input) { return hsqrt(input); } template <typename T> __global__ void ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v) { const T one = static_cast<T>(1.0); const T new_learning_rate = learning_rate[0] * SqrtFunc(one - beta2_power[0]) / (one - beta1_power[0]); for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { m[i] += (gradient[i] - m[i]) * (one - beta1[0]); v[i] += (gradient[i] * gradient[i] - v[i]) * (one - beta2[0]); variable[i] -= new_learning_rate * m[i] / (SqrtFunc(v[i]) + epsilon[0]); } } template <typename T> __global__ void AdamWeightDecayKernel(const size_t size, const T *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, T *variable, T *m, T *v) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { T next_m = beta1[0] * m[i] + (1 - beta1[0]) * gradient[i]; T next_v = beta2[0] * v[i] + (1 - beta2[0]) * gradient[i] * gradient[i]; T update = next_m / (sqrt(next_v) + epsilon[0]); update += decay[0] * variable[i]; variable[i] -= learning_rate[0] * update; m[i] = next_m; v[i] = next_v; } } template <> __global__ void AdamWeightDecayKernel(const size_t size, const half *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, half *variable, half *m, half *v) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { half next_m = __float2half(beta1[0]) * m[i] + __float2half(1 - beta1[0]) * gradient[i]; half next_v = __float2half(beta2[0]) * v[i] + __float2half(1 - beta2[0]) * gradient[i] * gradient[i]; half update = next_m / (hsqrt(next_v) + __float2half(epsilon[0])); update += __float2half(decay[0]) * variable[i]; variable[i] -= __float2half(learning_rate[0]) * update; m[i] = next_m; v[i] = next_v; } } template <typename T> void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v, hipStream_t cuda_stream) { hipLaunchKernelGGL(( ApplyAdamKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, gradient, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon, variable, m, v); } template <typename T> void AdamWeightDecayOp(const size_t size, const T *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, T *variable, T *m, T *v, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AdamWeightDecayKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, gradient, learning_rate, beta1, beta2, epsilon, decay, variable, m, v); } template void ApplyAdam<float>(const size_t size, const float *gradient, const float *beta1_power, const float *beta2_power, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, float *variable, float *m, float *v, hipStream_t cuda_stream); template void ApplyAdam<half>(const size_t size, const half *gradient, const half *beta1_power, const half *beta2_power, const half *learning_rate, const half *beta1, const half *beta2, const half *epsilon, half *variable, half *m, half *v, hipStream_t cuda_stream); template void AdamWeightDecayOp<float>(const size_t size, const float *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, float *variable, float *m, float *v, hipStream_t cuda_stream); template void AdamWeightDecayOp<half>(const size_t size, const half *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, half *variable, half *m, half *v, hipStream_t cuda_stream);
a0e9c3eac87c1eedc1c613f46d3fd0a79bb6c4e9.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "backend/kernel_compiler/gpu/cuda_impl/adam_impl.cuh" template <typename T> __device__ __forceinline__ T SqrtFunc(T input) { return sqrt(input); } template <> __device__ __forceinline__ half SqrtFunc(half input) { return hsqrt(input); } template <typename T> __global__ void ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v) { const T one = static_cast<T>(1.0); const T new_learning_rate = learning_rate[0] * SqrtFunc(one - beta2_power[0]) / (one - beta1_power[0]); for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { m[i] += (gradient[i] - m[i]) * (one - beta1[0]); v[i] += (gradient[i] * gradient[i] - v[i]) * (one - beta2[0]); variable[i] -= new_learning_rate * m[i] / (SqrtFunc(v[i]) + epsilon[0]); } } template <typename T> __global__ void AdamWeightDecayKernel(const size_t size, const T *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, T *variable, T *m, T *v) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { T next_m = beta1[0] * m[i] + (1 - beta1[0]) * gradient[i]; T next_v = beta2[0] * v[i] + (1 - beta2[0]) * gradient[i] * gradient[i]; T update = next_m / (sqrt(next_v) + epsilon[0]); update += decay[0] * variable[i]; variable[i] -= learning_rate[0] * update; m[i] = next_m; v[i] = next_v; } } template <> __global__ void AdamWeightDecayKernel(const size_t size, const half *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, half *variable, half *m, half *v) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += gridDim.x * blockDim.x) { half next_m = __float2half(beta1[0]) * m[i] + __float2half(1 - beta1[0]) * gradient[i]; half next_v = __float2half(beta2[0]) * v[i] + __float2half(1 - beta2[0]) * gradient[i] * gradient[i]; half update = next_m / (hsqrt(next_v) + __float2half(epsilon[0])); update += __float2half(decay[0]) * variable[i]; variable[i] -= __float2half(learning_rate[0]) * update; m[i] = next_m; v[i] = next_v; } } template <typename T> void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, const T *learning_rate, const T *beta1, const T *beta2, const T *epsilon, T *variable, T *m, T *v, cudaStream_t cuda_stream) { ApplyAdamKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>( size, gradient, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon, variable, m, v); } template <typename T> void AdamWeightDecayOp(const size_t size, const T *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, T *variable, T *m, T *v, cudaStream_t cuda_stream) { AdamWeightDecayKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, gradient, learning_rate, beta1, beta2, epsilon, decay, variable, m, v); } template void ApplyAdam<float>(const size_t size, const float *gradient, const float *beta1_power, const float *beta2_power, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, float *variable, float *m, float *v, cudaStream_t cuda_stream); template void ApplyAdam<half>(const size_t size, const half *gradient, const half *beta1_power, const half *beta2_power, const half *learning_rate, const half *beta1, const half *beta2, const half *epsilon, half *variable, half *m, half *v, cudaStream_t cuda_stream); template void AdamWeightDecayOp<float>(const size_t size, const float *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, float *variable, float *m, float *v, cudaStream_t cuda_stream); template void AdamWeightDecayOp<half>(const size_t size, const half *gradient, const float *learning_rate, const float *beta1, const float *beta2, const float *epsilon, const float *decay, half *variable, half *m, half *v, cudaStream_t cuda_stream);
b89179f9bba8cb9656b3994849acc8c344539137.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CSCI 563 Programming Assignment 3 Clayton Kramp */ #include <stdio.h> #include <math.h> #include <assert.h> #define THREADS_PER_LINE 16 using namespace std; __global__ void loadBalancedSpMV(float* t, float* b, int* ptr, float* data, int* ind, int n) { int myi = blockIdx.x * blockDim.x + threadIdx.x; int lb = ptr[myi / THREADS_PER_LINE]; int ub = ptr[(myi / THREADS_PER_LINE) + 1]; extern __shared__ float partialSum[]; partialSum[threadIdx.x] = 0; partialSum[threadIdx.x + THREADS_PER_LINE] = 0; for (int j = lb + threadIdx.x; j < ub; j += THREADS_PER_LINE) { int index = ind[j]; partialSum[threadIdx.x] += data[j] * b[index]; } for (unsigned int stride = THREADS_PER_LINE; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) partialSum[threadIdx.x] += partialSum[threadIdx.x+stride]; } if (threadIdx.x == 0) t[myi / THREADS_PER_LINE] = partialSum[threadIdx.x]; } main (int argc, char **argv) { float time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); FILE *fp; char line[1024]; int *ptr, *indices; float *data, *b, *t; int i,j; int n; // number of nonzero elements in data int nr; // number of rows in matrix int nc; // number of columns in matrix // Open input file and read to end of comments if (argc !=2) abort(); if ((fp = fopen(argv[1], "r")) == NULL) { abort(); } fgets(line, 128, fp); while (line[0] == '%') { fgets(line, 128, fp); } // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for ptr, indices, data, b and t. sscanf(line,"%d %d %d\n", &nr, &nc, &n); ptr = (int *) malloc ((nr+1)*sizeof(int)); indices = (int *) malloc(n*sizeof(int)); data = (float *) malloc(n*sizeof(float)); b = (float *) malloc(nc*sizeof(float)); t = (float *) malloc(nr*sizeof(float)); // Read data in coordinate format and initialize sparse matrix int lastr=0; for (i=0; i<n; i++) { int r; fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i])); indices[i]--; // start numbering at 0 if (r!=lastr) { ptr[r-1] = i; lastr = r; } } ptr[nr] = n; // initialize t to 0 and b with random data for (i=0; i<nr; i++) { t[i] = 0.0; } for (i=0; i<nc; i++) { b[i] = (float) rand()/1111111111; } // TODO: Compute result on GPU and compare output float* deviceT; hipMalloc(&deviceT, nr * sizeof(float)); hipMemcpy(deviceT, t, nr * sizeof(float), hipMemcpyHostToDevice); float* deviceB; hipMalloc(&deviceB, nc * sizeof(float)); hipMemcpy(deviceB, b, nc * sizeof(float), hipMemcpyHostToDevice); int* devicePtr; hipMalloc(&devicePtr, (nr+1) * sizeof(int)); hipMemcpy(devicePtr, ptr, (nr+1) * sizeof(int), hipMemcpyHostToDevice); float* deviceData; hipMalloc(&deviceData, n * sizeof(float)); hipMemcpy(deviceData, data, n * sizeof(float), hipMemcpyHostToDevice); int* deviceIndices; hipMalloc(&deviceIndices, n * sizeof(int)); hipMemcpy(deviceIndices, indices, n * sizeof(int), hipMemcpyHostToDevice); dim3 threadsPerBlock(16,1,1); dim3 numBlocks(nr, 1, 1); hipEventRecord(start, 0); hipLaunchKernelGGL(( loadBalancedSpMV), dim3(numBlocks), dim3(threadsPerBlock), nc+THREADS_PER_LINE, 0, deviceT, deviceB, devicePtr, deviceData, deviceIndices, n); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); printf("Time to generate: %3.5f ms \n", time); float* newT = (float *) malloc(nr*sizeof(float)); hipMemcpy(newT, deviceT, nr*sizeof(float), hipMemcpyDeviceToHost); // MAIN COMPUTATION, SEQUENTIAL VERSION for (i=0; i<nr; i++) { for (j = ptr[i]; j<ptr[i+1]; j++) { t[i] = t[i] + data[j] * b[indices[j]]; } } for (int i = 0; i < nr; i++) { assert(abs(newT[i] - t[i] < 0.0001)); } hipFree(deviceT); hipFree(deviceIndices); hipFree(devicePtr); hipFree(deviceData); hipFree(deviceB); free(newT); free(indices); free(ptr); free(data); free(t); free(b); printf("Completed and output matches sequential version\n"); }
b89179f9bba8cb9656b3994849acc8c344539137.cu
/* CSCI 563 Programming Assignment 3 Clayton Kramp */ #include <stdio.h> #include <math.h> #include <assert.h> #define THREADS_PER_LINE 16 using namespace std; __global__ void loadBalancedSpMV(float* t, float* b, int* ptr, float* data, int* ind, int n) { int myi = blockIdx.x * blockDim.x + threadIdx.x; int lb = ptr[myi / THREADS_PER_LINE]; int ub = ptr[(myi / THREADS_PER_LINE) + 1]; extern __shared__ float partialSum[]; partialSum[threadIdx.x] = 0; partialSum[threadIdx.x + THREADS_PER_LINE] = 0; for (int j = lb + threadIdx.x; j < ub; j += THREADS_PER_LINE) { int index = ind[j]; partialSum[threadIdx.x] += data[j] * b[index]; } for (unsigned int stride = THREADS_PER_LINE; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) partialSum[threadIdx.x] += partialSum[threadIdx.x+stride]; } if (threadIdx.x == 0) t[myi / THREADS_PER_LINE] = partialSum[threadIdx.x]; } main (int argc, char **argv) { float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); FILE *fp; char line[1024]; int *ptr, *indices; float *data, *b, *t; int i,j; int n; // number of nonzero elements in data int nr; // number of rows in matrix int nc; // number of columns in matrix // Open input file and read to end of comments if (argc !=2) abort(); if ((fp = fopen(argv[1], "r")) == NULL) { abort(); } fgets(line, 128, fp); while (line[0] == '%') { fgets(line, 128, fp); } // Read number of rows (nr), number of columns (nc) and // number of elements and allocate memory for ptr, indices, data, b and t. sscanf(line,"%d %d %d\n", &nr, &nc, &n); ptr = (int *) malloc ((nr+1)*sizeof(int)); indices = (int *) malloc(n*sizeof(int)); data = (float *) malloc(n*sizeof(float)); b = (float *) malloc(nc*sizeof(float)); t = (float *) malloc(nr*sizeof(float)); // Read data in coordinate format and initialize sparse matrix int lastr=0; for (i=0; i<n; i++) { int r; fscanf(fp,"%d %d %f\n", &r, &(indices[i]), &(data[i])); indices[i]--; // start numbering at 0 if (r!=lastr) { ptr[r-1] = i; lastr = r; } } ptr[nr] = n; // initialize t to 0 and b with random data for (i=0; i<nr; i++) { t[i] = 0.0; } for (i=0; i<nc; i++) { b[i] = (float) rand()/1111111111; } // TODO: Compute result on GPU and compare output float* deviceT; cudaMalloc(&deviceT, nr * sizeof(float)); cudaMemcpy(deviceT, t, nr * sizeof(float), cudaMemcpyHostToDevice); float* deviceB; cudaMalloc(&deviceB, nc * sizeof(float)); cudaMemcpy(deviceB, b, nc * sizeof(float), cudaMemcpyHostToDevice); int* devicePtr; cudaMalloc(&devicePtr, (nr+1) * sizeof(int)); cudaMemcpy(devicePtr, ptr, (nr+1) * sizeof(int), cudaMemcpyHostToDevice); float* deviceData; cudaMalloc(&deviceData, n * sizeof(float)); cudaMemcpy(deviceData, data, n * sizeof(float), cudaMemcpyHostToDevice); int* deviceIndices; cudaMalloc(&deviceIndices, n * sizeof(int)); cudaMemcpy(deviceIndices, indices, n * sizeof(int), cudaMemcpyHostToDevice); dim3 threadsPerBlock(16,1,1); dim3 numBlocks(nr, 1, 1); cudaEventRecord(start, 0); loadBalancedSpMV<<<numBlocks, threadsPerBlock, nc+THREADS_PER_LINE>>>(deviceT, deviceB, devicePtr, deviceData, deviceIndices, n); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time to generate: %3.5f ms \n", time); float* newT = (float *) malloc(nr*sizeof(float)); cudaMemcpy(newT, deviceT, nr*sizeof(float), cudaMemcpyDeviceToHost); // MAIN COMPUTATION, SEQUENTIAL VERSION for (i=0; i<nr; i++) { for (j = ptr[i]; j<ptr[i+1]; j++) { t[i] = t[i] + data[j] * b[indices[j]]; } } for (int i = 0; i < nr; i++) { assert(abs(newT[i] - t[i] < 0.0001)); } cudaFree(deviceT); cudaFree(deviceIndices); cudaFree(devicePtr); cudaFree(deviceData); cudaFree(deviceB); free(newT); free(indices); free(ptr); free(data); free(t); free(b); printf("Completed and output matches sequential version\n"); }
92fcacc2ca8bfff570af7667edbd784df6655465.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #define CHECK \ { \ const hipError_t i = hipGetLastError();\ if(i) \ printf("(%s:%i) %s\n", __FILE__, __LINE__-1, hipGetErrorString(i));\ } #define IDX_PATT(a, b) \ const int a = blockDim.x * blockIdx.x + threadIdx.x; \ const int b = blockDim.y * blockIdx.y + threadIdx.y; // FOOBAR = (2.0 * alpha * temperature) / (dt * gamma) template <unsigned int twiddle> __global__ void do_thermal32( const float* d_rng6, float FOOBAR, float* d_scale, float* d_hx, float* d_hy, float* d_hz, float* d_ms, const int nx, const int ny, const int offset) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; const int idx = x + y*nx + offset; const float ms = d_ms[idx]; if(ms != 0) { const float stddev = sqrt((FOOBAR * d_scale[idx]) / ms); d_hx[idx] = stddev * d_rng6[idx*6+0+twiddle*3]; d_hy[idx] = stddev * d_rng6[idx*6+1+twiddle*3]; d_hz[idx] = stddev * d_rng6[idx*6+2+twiddle*3]; } else { d_hx[idx] = 0; d_hy[idx] = 0; d_hz[idx] = 0; } } void cuda_thermal32(const float* d_rng6, const int twiddle, float alpha, float gamma, float dt, float temperature, float* d_hx, float* d_hy, float* d_hz, float* d_ms, float* d_scale, const int nx, const int ny, const int nz) { const float FOOBAR = (2.0 * alpha * temperature) / (dt * gamma); const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); if(twiddle == 0) { for(int i=0; i<nz; i++) { hipLaunchKernelGGL(( do_thermal32<0>), dim3(blocks), dim3(threads), 0, 0, d_rng6, FOOBAR, d_scale, d_hx, d_hy, d_hz, d_ms, nx, ny, nx*ny*i); CHECK } } else { for(int i=0; i<nz; i++) { hipLaunchKernelGGL(( do_thermal32<1>), dim3(blocks), dim3(threads), 0, 0, d_rng6, FOOBAR, d_scale, d_hx, d_hy, d_hz, d_ms, nx, ny, nx*ny*i); CHECK } } }
92fcacc2ca8bfff570af7667edbd784df6655465.cu
#include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <stdio.h> #define CHECK \ { \ const cudaError_t i = cudaGetLastError();\ if(i) \ printf("(%s:%i) %s\n", __FILE__, __LINE__-1, cudaGetErrorString(i));\ } #define IDX_PATT(a, b) \ const int a = blockDim.x * blockIdx.x + threadIdx.x; \ const int b = blockDim.y * blockIdx.y + threadIdx.y; // FOOBAR = (2.0 * alpha * temperature) / (dt * gamma) template <unsigned int twiddle> __global__ void do_thermal32( const float* d_rng6, float FOOBAR, float* d_scale, float* d_hx, float* d_hy, float* d_hz, float* d_ms, const int nx, const int ny, const int offset) { IDX_PATT(x, y); if(x >= nx || y >= ny) return; const int idx = x + y*nx + offset; const float ms = d_ms[idx]; if(ms != 0) { const float stddev = sqrt((FOOBAR * d_scale[idx]) / ms); d_hx[idx] = stddev * d_rng6[idx*6+0+twiddle*3]; d_hy[idx] = stddev * d_rng6[idx*6+1+twiddle*3]; d_hz[idx] = stddev * d_rng6[idx*6+2+twiddle*3]; } else { d_hx[idx] = 0; d_hy[idx] = 0; d_hz[idx] = 0; } } void cuda_thermal32(const float* d_rng6, const int twiddle, float alpha, float gamma, float dt, float temperature, float* d_hx, float* d_hy, float* d_hz, float* d_ms, float* d_scale, const int nx, const int ny, const int nz) { const float FOOBAR = (2.0 * alpha * temperature) / (dt * gamma); const int _blocksx = nx / 32 + 1; const int _blocksy = ny / 32 + 1; dim3 blocks(_blocksx, _blocksy); dim3 threads(32,32); if(twiddle == 0) { for(int i=0; i<nz; i++) { do_thermal32<0><<<blocks, threads>>>(d_rng6, FOOBAR, d_scale, d_hx, d_hy, d_hz, d_ms, nx, ny, nx*ny*i); CHECK } } else { for(int i=0; i<nz; i++) { do_thermal32<1><<<blocks, threads>>>(d_rng6, FOOBAR, d_scale, d_hx, d_hy, d_hz, d_ms, nx, ny, nx*ny*i); CHECK } } }
363d538ea76ca194e0ef716cf83fd8b2e45bbad5.hip
// !!! This is a file automatically generated by hipify!!! // nvcc -x cu -arch=sm_60 -std=c++11 ISMLMC-lookup.cu -o Lookup.o -ccbin /usr/bin/g++-4.8 #include "cmdlineparse.h" #include "CUDAISMLMC.h" #include <iostream> #include <thrust/tuple.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <random> #include <iostream> #include <stdio.h> #include <thread> #include <fstream> #include <future> #include <string> #include <algorithm> #include <sstream> #include <vector> using namespace std; // compute the elementwise average of a bunch of 2d arrays float** array_avg(float*** meta_array, int n_arrays, int rows, int cols) { float** avg_array = new float*[rows]; for (int i = 0; i < rows; i++) { avg_array[i] = new float[cols](); } for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { avg_array[i][j] += meta_array[a][i][j]/n_arrays; } } } return avg_array; } // compute the elementwise standard error of a bunch of 2d arrays float** array_stderr(float*** meta_array, int n_arrays, int rows, int cols) { float** avg_array = new float*[rows]; float** stderr_array = new float*[rows]; for (int i = 0; i < rows; i++) { avg_array[i] = new float[cols](); stderr_array[i] = new float[cols](); } avg_array = array_avg(meta_array, n_arrays, rows, cols); for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { stderr_array[i][j] += pow(meta_array[a][i][j]-avg_array[i][j],2); } } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // standard error is standard deviation divided by number of data points stderr_array[i][j] = sqrt(stderr_array[i][j]/n_arrays)/sqrt(n_arrays); } } return stderr_array; } // compute the uncertainty of a run based on the poisson counting distribution float** array_poisson(float*** meta_array, int n_arrays, int rows, int cols, int N) { // in Poisson counting distribution, R_unc = sqrt(R)/N where R is the number of counts // and N is the number of trials // In our case, R = r*N where r is the measured value and N is the number of photons // so r_unc = sqrt(r/N) float** unc_array = new float*[rows]; for (int i = 0; i < rows; i++) { unc_array[i] = new float[cols]; } for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { unc_array[i][j] = sqrt(meta_array[a][i][j]/N); } } } return unc_array; } // write the data in a neat way so Python (and humans) can easily read it void write_data(string filename, string header, int N_mu_a, int N_mu_s, float n, float g, float t, float* mu_a, float* mu_s_, float** data, bool first) { ofstream file; if (first) { file.open(filename); } else { file.open(filename, ios_base::app); } file << "\"" << header << "\",\"mua\""; for (int i = 0; i < N_mu_s; i++) { file << ",\"\""; } file << "\n"; file << "\"mus'\",\"\""; for (int i = 0; i < N_mu_s; i++) { file << "," << mu_s_[i]; } file << "\n"; for (int i = 0; i < N_mu_a; i++) { file << "\"\"," << mu_a[i]; for (int j = 0; j < N_mu_s; j++) { file << "," << data[i][j]; } file << "\n"; } file.close(); } // sample the whole space of optical coefficients provided in order to generate data for a lookup table void singleLayerSampleSpace(int* GPUs, int nGPU, int N, int runs, int N_mu_a, int N_mu_s, float* n, float* g, float* t, float* bounds, float* mu_a, float* mu_s_, bool R, float R_pw, float R_fs, float R_fp, float R_fd, float R_f, float R_angle, bool specular_included, bool T, float T_pw, float T_fs, float T_fp, float T_fd, float T_f, float T_angle, bool separate) { // future types for values that will be filled by threads future<thrust::tuple<float,float,float,float,float>>* thread_vals = new future<thrust::tuple<float,float,float,float,float>>[nGPU]; // determine if the poisson distribution should be used to find the error with just one run bool poisson = false; if (runs == 0) { runs = 1; poisson = true; } // initialization of 2D arrays that will hold measurement values for the whole sample space // we only care about R_d and T_d, but I have left the necessary code in to deal with the other values // T_u would need to be simulated separately if we wanted to find all the values with this code // just add thread3_vals just like the other two and set it up with R=false and T=false // then rip T_u from thread3_vals and Bob's your uncle. //float*** meta_A = new float**[runs]; float*** meta_R_d = new float**[runs]; //float*** meta_R_s = new float**[runs]; float*** meta_T_d = new float**[runs]; //float*** meta_T_u = new float**[runs]; for (int i = 0; i < runs; i++) { //meta_A[i] = new float*[N_mu_a]; meta_R_d[i] = new float*[N_mu_a]; //meta_R_s[i] = new float*[N_mu_a]; meta_T_d[i] = new float*[N_mu_a]; //meta_T_u[i] = new float*[N_mu_a]; } for (int i = 0; i < runs; i++) { for (int j = 0; j < N_mu_a; j++) { //meta_A[i][j] = new float[N_mu_s]; meta_R_d[i][j] = new float[N_mu_s]; //meta_R_s[i][j] = new float[N_mu_s]; meta_T_d[i][j] = new float[N_mu_s]; //meta_T_u[i][j] = new float[N_mu_s]; } } //float** A = new float*[N_mu_a]; float** R_d = new float*[N_mu_a]; //float** R_s = new float*[N_mu_a]; float** T_d = new float*[N_mu_a]; //float** T_u = new float*[N_mu_a]; //float** A_unc = new float*[N_mu_a]; float** R_d_unc = new float*[N_mu_a]; //float** R_s_unc = new float*[N_mu_a]; float** T_d_unc = new float*[N_mu_a]; //float** T_u_unc = new float*[N_mu_a]; for (int i = 0; i < N_mu_a; i++) { //A[i] = new float[N_mu_s]; R_d[i] = new float[N_mu_s]; //R_s[i] = new float[N_mu_s]; T_d[i] = new float[N_mu_s]; //T_u[i] = new float[N_mu_s]; //A_unc[i] = new float[N_mu_s]; R_d_unc[i] = new float[N_mu_s]; //R_s_unc[i] = new float[N_mu_s]; T_d_unc[i] = new float[N_mu_s]; //T_u_unc[i] = new float[N_mu_s]; } // temp variables to hold each measurement value from each GPU float* A_ = new float[nGPU]; float* R_d_ = new float[nGPU]; float* R_s_ = new float[nGPU]; float* T_d_ = new float[nGPU]; float* T_u_ = new float[nGPU]; // temp variables to store the currect optical properties float** _mu_a = new float*[nGPU]; float** _mu_s = new float*[nGPU]; float** _mu_s_ = new float*[nGPU]; float** _mu_t = new float*[nGPU]; for (int i = 0; i < nGPU; i++) { _mu_a[i] = new float[1]; _mu_s[i] = new float[1]; _mu_s_[i] = new float[1]; _mu_t[i] = new float[1]; } // calculating mu_s from mu_s' float* mu_s = new float[N_mu_s]; for (int i = 0; i < N_mu_s; i++) { if (g[0] == 0) { mu_s[i] = mu_s_[i]; } else { mu_s[i] = mu_s_[i]/(1-g[0]); } } // current device id int dev; // run as many times as requested for (int r = 0; r < runs; r++) { // sample all mu_s for (int j = 0; j < N_mu_s; j++) { for (int d = 0; d < nGPU; d++) { _mu_s[d][0] = mu_s[j]; _mu_s_[d][0] = mu_s_[j]; } // sample all mu_a for (int i = 0; i < N_mu_a; i+=nGPU) { // if T_d and R_d were measured separately, do both configurations at once, if not, then two two samples at once if (!separate) { for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], R, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, T, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save values to arrays if (R || T) { //A[i+d][j] = A_[d]; R_d[i+d][j] = R_d_[d]; //R_s[i+d][j] = R_s_[d]; T_d[i+d][j] = T_d_[d]; //T_u[i+d][j] = T_u_[d]; } else { //A[i+d][j] = A_[d]; R_d[i+d][j] = R_d_[d] + R_s_[d]; //R_s[i+d][j] = R_s_[d]; T_d[i+d][j] = T_d_[d] + T_u_[d]; //T_u[i+d][j] = T_u_[d]; } } } else if (separate) { for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], true, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, false, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | R Sphere | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save R_d to array R_d[i+d][j] = R_d_[d]; } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], false, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, true, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU&& i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | T Sphere | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save T_d to array T_d[i+d][j] = T_d_[d]; } // save values to arrays //A[i+d][j] = something //R_s[i+d][j] = something //T_u[i+d][j] = something } } // save this run's data for (int i = 0; i < N_mu_a; i++) { for (int j = 0; j < N_mu_s; j++) { //meta_A[r][i][j] = A[i][j]; meta_R_d[r][i][j] = R_d[i][j]; //meta_R_s[r][i][j] = R_s[i][j]; meta_T_d[r][i][j] = T_d[i][j]; //meta_T_u[r][i][j] = T_u[i][j]; } } } } // find averages and standard deviations //A = array_avg(meta_A, runs, N_mu_a, N_mu_s); R_d = array_avg(meta_R_d, runs, N_mu_a, N_mu_s); //R_s = array_avg(meta_R_s, runs, N_mu_a, N_mu_s); T_d = array_avg(meta_T_d, runs, N_mu_a, N_mu_s); //T_u = array_avg(meta_T_u, runs, N_mu_a, N_mu_s); if (poisson) { //A_unc = array_poisson(meta_A, runs, N_mu_a, N_mu_s, N); R_d_unc = array_poisson(meta_R_d, runs, N_mu_a, N_mu_s, N); //R_s_unc = array_poisson(meta_R_s, runs, N_mu_a, N_mu_s, N); T_d_unc = array_poisson(meta_T_d, runs, N_mu_a, N_mu_s, N); //T_u_unc = array_poisson(meta_T_u, runs, N_mu_a, N_mu_s, N); } else { //A_unc = array_stderr(meta_A, runs, N_mu_a, N_mu_s); R_d_unc = array_stderr(meta_R_d, runs, N_mu_a, N_mu_s); //R_s_unc = array_stderr(meta_R_s, runs, N_mu_a, N_mu_s); T_d_unc = array_stderr(meta_T_d, runs, N_mu_a, N_mu_s); //T_u_unc = array_stderr(meta_T_u, runs, N_mu_a, N_mu_s); } // write data in such a way that python and numpy can easily read it and generate a lookup table //write_data("Absorptance.csv", "Average Absorptance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, A, true); write_data("Diffuse_Reflectance.csv", "Average Diffuse Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_d, true); //write_data("Specular_Reflectance.csv", "Average Specular Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_s, true); write_data("Diffuse_Transmittance.csv", "Average Diffuse Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_d, true); //write_data("Unscattered_Transmittance.csv", "Average Unscattered Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_u, true); //write_data("Absorptance.csv", "Standard Error in Absorptance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, A_unc, false); write_data("Diffuse_Reflectance.csv", "Standard Error in Diffuse Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_d_unc, false); //write_data("Specular_Reflectance.csv", "Standard Error in Specular Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_s_unc, false); write_data("Diffuse_Transmittance.csv", "Standard Error in Diffuse Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_d_unc, false); //write_data("Unscattered_Transmittance.csv", "Standard Error in Unscattered Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_u_unc, false); } int main(int argc, char* argv[]) { if (cmdOptionExists(argv, argv+argc, "--help") || cmdOptionExists(argv, argv+argc, "-h")) { cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nIntegrating Sphere Monte Carlo Lookup Table Data Generator\n"; cout << "\nWritten by Patrick Cook | Fort Hays State University | 4 May 2019\n"; cout << "[email protected] or [email protected]\n"; cout << "\nTo be used in conjuction with the provided python script to create lookup tables.\nThis code currently only produces data for diffuse reflectance and diffuse transmittance.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nUseful Flags:\n"; cout << "\n--help - Shows this help text.\n"; cout << "\n-h - Same as --help.\n"; cout << "\n--specularIncluded\n - Changes the incident beam to 8deg from the normal to include\n specular reflection in any reflection sphere that may be present.\n"; cout << "\n--separate\n - Run simulations separately to measure diffuse reflectance and diffuse transmittance.\n Usually enabled when using a single sphere to measure R_d and T_d.\n Disable for dual-sphere experiments.\n >>> If separate is enabled then ALL of --Rsphere\n --Rangle --Tsphere and --Tangle must be specified for accurate results. <<<"; cout << "\n--example\n - Show an example configuration and run it.\n Makes all required parameters except --GPU optional.\n Useful for ensuring your installation works.\n"; cout << "\n--go - Just run the simulation. Don't ask to confirm.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nRequired Parameters:\n"; cout << "\n--GPU - Device IDs for GPUs to run on.\n Use the 'nvidia-smi' command to list available GPUs.\n Mutliple GPU IDs must be in quotes.\n Example: --GPU \"0 2\"\n"; cout << "\n--unc - Uncertainty threshold for all measured parameters. The number of photons will be\n calculated such that this uncertainty should be reached.\n"; cout << "\n-n - Relative refractive index (relative to the surrounding medium) to use for all samples.\n"; cout << "\n-g - Anisotropy of all samples.\n"; cout << "\n-t - Thickness of all samples in centimeters.\n"; cout << "\n--Nmua - Total number of absorption coefficients.\n"; cout << "\n--muaS - Starting value for the absorption coefficients in 1/cm.\n"; cout << "\n--muaB - Base of the geometric range for the absorption coefficients.\n"; cout << " Absorption coefficients will be generated with mua[i] = muaS*muaB^i for 0 <= i < Nmua.\n"; cout << "\n--Nmus - Total number of REDUCED scattering coefficients.\n"; cout << "\n--musS - Starting value for the REDUCED scattering coefficients in 1/cm.\n"; cout << "\n--musB - Base of the geometric range for the REDUCED scattering coefficients.\n"; cout << " REDUCED scattering coeffs will be generated with mus[i] = musS*musB^i for 0 <= i < Nmus.\n"; cout << " To convert from the reduced scattering coefficient, mus', to the scattering coefficient:\n"; cout << " mus = mus'/(1-g) for g!=1 and mus = mus' if g = 1.\n"; cout << "\n\nOptional Parameters:\n"; cout << "\n--est - Estimate of largest value to be measured. Will reduce number of photons\n necessary to reach certain error.\n Example: --est 0.75\n"; cout << "\n--Rsphere\n - Parameters of the sphere measuring reflectance. Must be in quotes and\n in the following order: pw fs fp fd\n - pw is the reflectance of the inner wall\n - fs is the sample port fractional area\n - fp is the source port fractional area.\n - fd is the detector fractional area\n Example: --Rsphere \"0.99 0.1 0.1 0.2\"\n >>> If --Rsphere is not specified then Rangle MUST be. <<<\n"; cout << "\n--Rangle\n - Angle threshold in degrees for what counts as specular\n reflectance when there is no reflection sphere present.\n"; cout << "\n--Tsphere\n - Parameters of the sphere measuring transmittance. Must be in quotes and\n in the following order: pw fs fp fd\n - pw is the reflectance of the inner wall\n - fs is the sample/source port fractional area\n - fp is the optional port fractional area.\n - fd is the detector fractional area\n Example: --Tsphere \"0.99 0.1 0.1 0.2\"\n >>> If --Tsphere is not specified then Tangle MUST be. <<<\n"; cout << "\n--Tangle\n - Angle threshold in degrees for what counts as unscattered\n transmittance when there is no transmission sphere present.\n"; cout << "\n-N - Can be used to override --unc. Must be used in conjuction with --runs.\n Number of photons to use per simulation.\n"; cout << "\n--runs - Can be used to override --unc. Must be used in conjuction with -N.\n Number of times to run each simulation.\n Results will report the average and standard error of these runs.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nNotes:\n"; cout << "If you are getting 'out of memory' errors, reduce N or change the RNGS variable in the source\ncode to something smaller.\n\n"; return 1; } if (!cmdOptionExists(argv, argv+argc, "--GPU")) { cout << "Must specify device IDs with the --GPU flag.\nUse --help to see available options.\n"; return 2; } srand(time(NULL)); //// This block finds the number of GPUs and declares variables for CURAND for each of them //// int nGPU = 0; char* GPU = getCmdOption(argv, argv+argc, "--GPU"); string GPUstr(GPU); stringstream GPUss1(GPUstr); int temp1; while (GPUss1 >> temp1) { nGPU++; } stringstream GPUss2(GPUstr); int temp2; int* GPUs = new int[nGPU]; int fGPU = 0; for (int i = 0; GPUss2 >> temp2; i++) { GPUs[i] = temp2; if (GPUs[i] > fGPU) { fGPU = GPUs[i]; } cout << "\n"; CUDABasicProperties(GPUs[i]); } cout << "\n"; fGPU++; rand_set = new bool[fGPU](); globalDeviceStates = new hiprandState_t*[fGPU]; //// //// //// //// //// //// //// //// //// //// bool go = cmdOptionExists(argv, argv+argc, "--go"); // placeholder parameters that will persist if the user doesn't set them //====Sphere Parameters====// bool R = false; // boolean to determine if the reflection sphere is present or not float R_pw = nanf("0"); // reflectance of reflection sphere float R_fs = nanf("0"); // sample port fraction of reflection sphere float R_fp = nanf("0"); // source port fraction of reflection sphere float R_fd = nanf("0"); // detector fraction of reflection sphere float R_f = nanf("0"); // total port fraction of reflection sphere float R_angle = nanf("0"); // angle threshold for specular reflection if no Rsphere is present bool specular_included = false; // boolean to determine if specular reflection is included or not bool T = false; // boolean to determine if the transmission sphere is present or not float T_pw = nanf("0"); // reflectance of transmission sphere float T_fs = nanf("0"); // sample port fraction of transmission sphere float T_fp = nanf("0"); // optional port fraction of transmission sphere float T_fd = nanf("0"); // detector fraction of transmission sphere float T_f = nanf("0"); // total port fraction of transmission sphere float T_angle = nanf("0"); // angle threshold for direct transmission if no Tsphere is present //========// int N = -1; int runs = -1; float uncertainty = 0; float estimate = 1; bool separate = false; // measure R_d and T_d separately if true float n[1] = {nanf("0")}; float g[1] = {nanf("0")}; float t[1] = {nanf("0")}; int N_mu_a = -1; float mu_a_start = nanf("0"); float mu_a_base = nanf("0"); int N_mu_s = -1; float mu_s__start = nanf("0"); float mu_s__base = nanf("0"); // example parameters if (cmdOptionExists(argv, argv+argc, "--example")) { //====Sphere Parameters====// R = true; // boolean to determine if the reflection sphere is present or not R_pw = 0.99; // reflectance of reflection sphere R_fs = 0.023510; // sample port fraction of reflection sphere R_fp = 0.054638; // source port fraction of reflection sphere R_fd = 0.023510; // detector fraction of reflection sphere R_f = R_fs + R_fp + R_fd; // total port fraction of reflection sphere R_angle = 0; // angle threshold for specular reflection if no Rsphere is present specular_included = false; // boolean to determine if specular reflection is included or not T = true; // boolean to determine if the transmission sphere is present or not T_pw = 0.99; // reflectance of transmission sphere T_fs = 0.054638; // sample port fraction of transmission sphere T_fp = 0.023510; // optional port fraction of transmission sphere T_fd = 0.023510; // detector fraction of transmission sphere T_f = T_fs + T_fp + T_fd; // total port fraction of transmission sphere T_angle = 0.22*M_PI/180; // angle threshold for direct transmission if no Tsphere is present //========// N = 100000; runs = 10; separate = true; n[0] = 1.4; g[0] = 0.5; t[0] = 0.135; N_mu_a = 13; mu_a_start = 0.5; mu_a_base = 1.2; N_mu_s = 11; mu_s__start = 10; mu_s__base = 1.2; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nEXAMPLE LOOKUP TABLE SIMULATION\n"; printf("\nParameters (as found in Cook et al 2019):\n\n-N %d\n--runs %d\n-n %f\n-g %f\n-t %f\n--Nmua %d\n--muaS %f\n--muaB %f\n--Nmus %d\n--musS %f\n--musB %f\n--Rsphere \"%f %f %f %f\"\n--Rangle %f\n--Tsphere \"%f %f %f %f\"\n--Tangle %f\n--separate\n", N, runs, n[0], g[0], t[0], N_mu_a, mu_a_start, mu_a_base, N_mu_s, mu_s__start, mu_s__base, R_pw, R_fs, R_fp, R_fd, R_angle*180/M_PI, T_pw, T_fs, T_fp, T_fd, T_angle*180/M_PI); if (!go) { cout <<"\nPress [enter] to start or Ctrl+C to cancel."; getchar(); } } // user specified parameters parsed from command line else { if ((cmdOptionExists(argv, argv+argc, "-N") != cmdOptionExists(argv, argv+argc, "--runs")) && !(cmdOptionExists(argv, argv+argc, "--unc"))) { cout << "-N and --runs must be used in conjuction, not by themselves.\n"; return 2; } if ((cmdOptionExists(argv, argv+argc, "-N") || cmdOptionExists(argv, argv+argc, "--runs")) && (cmdOptionExists(argv, argv+argc, "--unc") || cmdOptionExists(argv, argv+argc, "--est"))) { cout << "-N and --runs cannot be used with --unc or --est\n"; return 2; } //====Sphere Parameters====// R = cmdOptionExists(argv, argv+argc, "--Rsphere"); // boolean to determine if the reflection sphere is present or not float* R_params = new float[4]; for (int i = 0; i < 4; i++) { R_params[i] = nanf("1"); } if (R) { R_params = readArrOption(argv, argv+argc, "--Rsphere", 4); } R_pw = R_params[0]; // reflectance of reflection sphere R_fs = R_params[1]; // sample port fraction of reflection sphere R_fp = R_params[2]; // source port fraction of reflection sphere R_fd = R_params[3]; // detector fraction of reflection sphere R_f = R_fs + R_fp + R_fd; // total port fraction of reflection sphere if (cmdOptionExists(argv, argv+argc, "--Rangle")){ R_angle = readFloatOption(argv, argv+argc, "--Rangle")*M_PI/180; } // angle threshold for specular reflection if no Rsphere is present else { R_angle = 0; } specular_included = cmdOptionExists(argv, argv+argc, "--specularIncluded"); // boolean to determine if specular reflection is included or not T = cmdOptionExists(argv, argv+argc, "--Tsphere"); // boolean to determine if the transmission sphere is present or not float* T_params = new float[4]; for (int i = 0; i < 4; i++) { T_params[i] = nanf("1"); } if (T) { T_params = readArrOption(argv, argv+argc, "--Tsphere", 4); } T_pw = T_params[0]; // reflectance of transmission sphere T_fs = T_params[1]; // sample port fraction of transmission sphere T_fp = T_params[2]; // optional port fraction of transmission sphere T_fd = T_params[3]; // detector fraction of transmission sphere T_f = T_fs + T_fp + T_fd; // total port fraction of transmission sphere if (cmdOptionExists(argv, argv+argc, "--Tangle")){ T_angle = readFloatOption(argv, argv+argc, "--Tangle")*M_PI/180; } // angle threshold for direct transmission if no Tsphere is present else { T_angle = 0; } //========// if (cmdOptionExists(argv, argv+argc, "-N") && cmdOptionExists(argv, argv+argc, "--runs")) { N = readIntOption(argv, argv+argc, "-N"); runs = readIntOption(argv, argv+argc, "--runs"); } else if (cmdOptionExists(argv, argv+argc, "--unc")) { runs = 0; if (cmdOptionExists(argv, argv+argc, "--est")) { estimate = readFloatOption(argv, argv+argc, "--est"); } uncertainty = readFloatOption(argv, argv+argc, "--unc"); N = (int)ceil(estimate/(uncertainty*uncertainty)); } separate = cmdOptionExists(argv, argv+argc, "--separate"); n[0] = readFloatOption(argv, argv+argc, "-n"); g[0] = readFloatOption(argv, argv+argc, "-g"); t[0] = readFloatOption(argv, argv+argc, "-t"); N_mu_a = readIntOption(argv, argv+argc, "--Nmua"); mu_a_start = readFloatOption(argv, argv+argc, "--muaS"); mu_a_base = readFloatOption(argv, argv+argc, "--muaB"); N_mu_s = readIntOption(argv, argv+argc, "--Nmus"); mu_s__start = readFloatOption(argv, argv+argc, "--musS"); mu_s__base = readFloatOption(argv, argv+argc, "--musB"); cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nParameters:\n"; if ( cmdOptionExists(argv, argc+argv, "--unc") ) { printf("\n--unc %f", uncertainty); } if ( cmdOptionExists(argv, argc+argv, "--est") ) { printf("\n--est %f", estimate); } if ( cmdOptionExists(argv, argc+argv, "-N") ) { printf("\n-N %d\n--runs %d", N, runs); } else { printf("\n-N %d (calculated)", N); } printf("\n-n %f\n-g %f\n-t %f\n--Nmua %d\n--muaS %f\n--muaB %f\n--Nmus %d\n--musS %f\n--musB %f\n--Rsphere \"%f %f %f %f\"\n--Rangle %f\n--Tsphere \"%f %f %f %f\"\n--Tangle %f\n", n[0], g[0], t[0], N_mu_a, mu_a_start, mu_a_base, N_mu_s, mu_s__start, mu_s__base, R_pw, R_fs, R_fp, R_fd, R_angle*180/M_PI, T_pw, T_fs, T_fp, T_fd, T_angle*180/M_PI); if (!go) { cout <<"\nPress [enter] to start or Ctrl+C to cancel.\n"; getchar(); } } // calculate the arrays that store the entire space of mua and mus' float* mu_a = new float[N_mu_a]; float* mu_s_= new float[N_mu_s]; for (int i = 0; i < N_mu_a; i++) { mu_a[i] = mu_a_start*pow(mu_a_base,i); } for (int i = 0; i < N_mu_s; i++) { mu_s_[i] = mu_s__start*pow(mu_s__base,i); } float* mu_s = new float[N_mu_a]; for (int i = 0; i < N_mu_s; i++) { if (g[0] == 1) { mu_s[i] = mu_s_[i]; } else { mu_s[i] = mu_s_[i]/(1-g[0]); } } int layers = 1; // define layer boundaries float* bounds = new float[layers+1]; bounds[0] = 0; for( int i = 1; i < layers+1; i++ ) { bounds[i] = bounds[i-1] + t[i-1]; } // generate the data for the lookup table singleLayerSampleSpace(GPUs, nGPU, N, runs, N_mu_a, N_mu_s, n, g, t, bounds, mu_a, mu_s_, R, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, T, T_pw, T_fs, T_fp, T_fd, T_f, T_angle, separate); return 0; }
363d538ea76ca194e0ef716cf83fd8b2e45bbad5.cu
// nvcc -x cu -arch=sm_60 -std=c++11 ISMLMC-lookup.cu -o Lookup.o -ccbin /usr/bin/g++-4.8 #include "cmdlineparse.h" #include "CUDAISMLMC.h" #include <iostream> #include <thrust/tuple.h> #include <curand.h> #include <curand_kernel.h> #include <random> #include <iostream> #include <stdio.h> #include <thread> #include <fstream> #include <future> #include <string> #include <algorithm> #include <sstream> #include <vector> using namespace std; // compute the elementwise average of a bunch of 2d arrays float** array_avg(float*** meta_array, int n_arrays, int rows, int cols) { float** avg_array = new float*[rows]; for (int i = 0; i < rows; i++) { avg_array[i] = new float[cols](); } for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { avg_array[i][j] += meta_array[a][i][j]/n_arrays; } } } return avg_array; } // compute the elementwise standard error of a bunch of 2d arrays float** array_stderr(float*** meta_array, int n_arrays, int rows, int cols) { float** avg_array = new float*[rows]; float** stderr_array = new float*[rows]; for (int i = 0; i < rows; i++) { avg_array[i] = new float[cols](); stderr_array[i] = new float[cols](); } avg_array = array_avg(meta_array, n_arrays, rows, cols); for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { stderr_array[i][j] += pow(meta_array[a][i][j]-avg_array[i][j],2); } } } for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { // standard error is standard deviation divided by number of data points stderr_array[i][j] = sqrt(stderr_array[i][j]/n_arrays)/sqrt(n_arrays); } } return stderr_array; } // compute the uncertainty of a run based on the poisson counting distribution float** array_poisson(float*** meta_array, int n_arrays, int rows, int cols, int N) { // in Poisson counting distribution, R_unc = sqrt(R)/N where R is the number of counts // and N is the number of trials // In our case, R = r*N where r is the measured value and N is the number of photons // so r_unc = sqrt(r/N) float** unc_array = new float*[rows]; for (int i = 0; i < rows; i++) { unc_array[i] = new float[cols]; } for (int a = 0; a < n_arrays; a++) { for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { unc_array[i][j] = sqrt(meta_array[a][i][j]/N); } } } return unc_array; } // write the data in a neat way so Python (and humans) can easily read it void write_data(string filename, string header, int N_mu_a, int N_mu_s, float n, float g, float t, float* mu_a, float* mu_s_, float** data, bool first) { ofstream file; if (first) { file.open(filename); } else { file.open(filename, ios_base::app); } file << "\"" << header << "\",\"mua\""; for (int i = 0; i < N_mu_s; i++) { file << ",\"\""; } file << "\n"; file << "\"mus'\",\"\""; for (int i = 0; i < N_mu_s; i++) { file << "," << mu_s_[i]; } file << "\n"; for (int i = 0; i < N_mu_a; i++) { file << "\"\"," << mu_a[i]; for (int j = 0; j < N_mu_s; j++) { file << "," << data[i][j]; } file << "\n"; } file.close(); } // sample the whole space of optical coefficients provided in order to generate data for a lookup table void singleLayerSampleSpace(int* GPUs, int nGPU, int N, int runs, int N_mu_a, int N_mu_s, float* n, float* g, float* t, float* bounds, float* mu_a, float* mu_s_, bool R, float R_pw, float R_fs, float R_fp, float R_fd, float R_f, float R_angle, bool specular_included, bool T, float T_pw, float T_fs, float T_fp, float T_fd, float T_f, float T_angle, bool separate) { // future types for values that will be filled by threads future<thrust::tuple<float,float,float,float,float>>* thread_vals = new future<thrust::tuple<float,float,float,float,float>>[nGPU]; // determine if the poisson distribution should be used to find the error with just one run bool poisson = false; if (runs == 0) { runs = 1; poisson = true; } // initialization of 2D arrays that will hold measurement values for the whole sample space // we only care about R_d and T_d, but I have left the necessary code in to deal with the other values // T_u would need to be simulated separately if we wanted to find all the values with this code // just add thread3_vals just like the other two and set it up with R=false and T=false // then rip T_u from thread3_vals and Bob's your uncle. //float*** meta_A = new float**[runs]; float*** meta_R_d = new float**[runs]; //float*** meta_R_s = new float**[runs]; float*** meta_T_d = new float**[runs]; //float*** meta_T_u = new float**[runs]; for (int i = 0; i < runs; i++) { //meta_A[i] = new float*[N_mu_a]; meta_R_d[i] = new float*[N_mu_a]; //meta_R_s[i] = new float*[N_mu_a]; meta_T_d[i] = new float*[N_mu_a]; //meta_T_u[i] = new float*[N_mu_a]; } for (int i = 0; i < runs; i++) { for (int j = 0; j < N_mu_a; j++) { //meta_A[i][j] = new float[N_mu_s]; meta_R_d[i][j] = new float[N_mu_s]; //meta_R_s[i][j] = new float[N_mu_s]; meta_T_d[i][j] = new float[N_mu_s]; //meta_T_u[i][j] = new float[N_mu_s]; } } //float** A = new float*[N_mu_a]; float** R_d = new float*[N_mu_a]; //float** R_s = new float*[N_mu_a]; float** T_d = new float*[N_mu_a]; //float** T_u = new float*[N_mu_a]; //float** A_unc = new float*[N_mu_a]; float** R_d_unc = new float*[N_mu_a]; //float** R_s_unc = new float*[N_mu_a]; float** T_d_unc = new float*[N_mu_a]; //float** T_u_unc = new float*[N_mu_a]; for (int i = 0; i < N_mu_a; i++) { //A[i] = new float[N_mu_s]; R_d[i] = new float[N_mu_s]; //R_s[i] = new float[N_mu_s]; T_d[i] = new float[N_mu_s]; //T_u[i] = new float[N_mu_s]; //A_unc[i] = new float[N_mu_s]; R_d_unc[i] = new float[N_mu_s]; //R_s_unc[i] = new float[N_mu_s]; T_d_unc[i] = new float[N_mu_s]; //T_u_unc[i] = new float[N_mu_s]; } // temp variables to hold each measurement value from each GPU float* A_ = new float[nGPU]; float* R_d_ = new float[nGPU]; float* R_s_ = new float[nGPU]; float* T_d_ = new float[nGPU]; float* T_u_ = new float[nGPU]; // temp variables to store the currect optical properties float** _mu_a = new float*[nGPU]; float** _mu_s = new float*[nGPU]; float** _mu_s_ = new float*[nGPU]; float** _mu_t = new float*[nGPU]; for (int i = 0; i < nGPU; i++) { _mu_a[i] = new float[1]; _mu_s[i] = new float[1]; _mu_s_[i] = new float[1]; _mu_t[i] = new float[1]; } // calculating mu_s from mu_s' float* mu_s = new float[N_mu_s]; for (int i = 0; i < N_mu_s; i++) { if (g[0] == 0) { mu_s[i] = mu_s_[i]; } else { mu_s[i] = mu_s_[i]/(1-g[0]); } } // current device id int dev; // run as many times as requested for (int r = 0; r < runs; r++) { // sample all mu_s for (int j = 0; j < N_mu_s; j++) { for (int d = 0; d < nGPU; d++) { _mu_s[d][0] = mu_s[j]; _mu_s_[d][0] = mu_s_[j]; } // sample all mu_a for (int i = 0; i < N_mu_a; i+=nGPU) { // if T_d and R_d were measured separately, do both configurations at once, if not, then two two samples at once if (!separate) { for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], R, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, T, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save values to arrays if (R || T) { //A[i+d][j] = A_[d]; R_d[i+d][j] = R_d_[d]; //R_s[i+d][j] = R_s_[d]; T_d[i+d][j] = T_d_[d]; //T_u[i+d][j] = T_u_[d]; } else { //A[i+d][j] = A_[d]; R_d[i+d][j] = R_d_[d] + R_s_[d]; //R_s[i+d][j] = R_s_[d]; T_d[i+d][j] = T_d_[d] + T_u_[d]; //T_u[i+d][j] = T_u_[d]; } } } else if (separate) { for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], true, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, false, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | R Sphere | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save R_d to array R_d[i+d][j] = R_d_[d]; } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; _mu_a[d][0] = mu_a[i+d]; _mu_t[d][0] = _mu_s[d][0] + _mu_a[d][0]; thread_vals[d] = async(launch::async, &ISMLMC, dev, N, 1, n, g, t, bounds, _mu_a[d], _mu_s[d], _mu_t[d], false, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, true, T_pw, T_fs, T_fp, T_fd, T_f, T_angle); } for (int d = 0; d < nGPU&& i+d < N_mu_a; d++) { thrust::tie(A_[d],R_d_[d],R_s_[d],T_d_[d],T_u_[d]) = thread_vals[d].get(); } for (int d = 0; d < nGPU && i+d < N_mu_a; d++) { dev = GPUs[d]; printf("RUN: %2d of %2d | GPU %2d | T Sphere | mua: %8.4f mus: %8.4f | A: %1.6f R_d %1.6f T_d %1.6f\n", r+1, runs, dev, mu_a[i+d], mu_s_[j], A_[d], R_d_[d], T_d_[d]); // save T_d to array T_d[i+d][j] = T_d_[d]; } // save values to arrays //A[i+d][j] = something //R_s[i+d][j] = something //T_u[i+d][j] = something } } // save this run's data for (int i = 0; i < N_mu_a; i++) { for (int j = 0; j < N_mu_s; j++) { //meta_A[r][i][j] = A[i][j]; meta_R_d[r][i][j] = R_d[i][j]; //meta_R_s[r][i][j] = R_s[i][j]; meta_T_d[r][i][j] = T_d[i][j]; //meta_T_u[r][i][j] = T_u[i][j]; } } } } // find averages and standard deviations //A = array_avg(meta_A, runs, N_mu_a, N_mu_s); R_d = array_avg(meta_R_d, runs, N_mu_a, N_mu_s); //R_s = array_avg(meta_R_s, runs, N_mu_a, N_mu_s); T_d = array_avg(meta_T_d, runs, N_mu_a, N_mu_s); //T_u = array_avg(meta_T_u, runs, N_mu_a, N_mu_s); if (poisson) { //A_unc = array_poisson(meta_A, runs, N_mu_a, N_mu_s, N); R_d_unc = array_poisson(meta_R_d, runs, N_mu_a, N_mu_s, N); //R_s_unc = array_poisson(meta_R_s, runs, N_mu_a, N_mu_s, N); T_d_unc = array_poisson(meta_T_d, runs, N_mu_a, N_mu_s, N); //T_u_unc = array_poisson(meta_T_u, runs, N_mu_a, N_mu_s, N); } else { //A_unc = array_stderr(meta_A, runs, N_mu_a, N_mu_s); R_d_unc = array_stderr(meta_R_d, runs, N_mu_a, N_mu_s); //R_s_unc = array_stderr(meta_R_s, runs, N_mu_a, N_mu_s); T_d_unc = array_stderr(meta_T_d, runs, N_mu_a, N_mu_s); //T_u_unc = array_stderr(meta_T_u, runs, N_mu_a, N_mu_s); } // write data in such a way that python and numpy can easily read it and generate a lookup table //write_data("Absorptance.csv", "Average Absorptance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, A, true); write_data("Diffuse_Reflectance.csv", "Average Diffuse Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_d, true); //write_data("Specular_Reflectance.csv", "Average Specular Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_s, true); write_data("Diffuse_Transmittance.csv", "Average Diffuse Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_d, true); //write_data("Unscattered_Transmittance.csv", "Average Unscattered Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_u, true); //write_data("Absorptance.csv", "Standard Error in Absorptance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, A_unc, false); write_data("Diffuse_Reflectance.csv", "Standard Error in Diffuse Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_d_unc, false); //write_data("Specular_Reflectance.csv", "Standard Error in Specular Reflectance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, R_s_unc, false); write_data("Diffuse_Transmittance.csv", "Standard Error in Diffuse Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_d_unc, false); //write_data("Unscattered_Transmittance.csv", "Standard Error in Unscattered Transmittance", N_mu_a, N_mu_s, n[0], g[0], t[0], mu_a, mu_s_, T_u_unc, false); } int main(int argc, char* argv[]) { if (cmdOptionExists(argv, argv+argc, "--help") || cmdOptionExists(argv, argv+argc, "-h")) { cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nIntegrating Sphere Monte Carlo Lookup Table Data Generator\n"; cout << "\nWritten by Patrick Cook | Fort Hays State University | 4 May 2019\n"; cout << "[email protected] or [email protected]\n"; cout << "\nTo be used in conjuction with the provided python script to create lookup tables.\nThis code currently only produces data for diffuse reflectance and diffuse transmittance.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nUseful Flags:\n"; cout << "\n--help - Shows this help text.\n"; cout << "\n-h - Same as --help.\n"; cout << "\n--specularIncluded\n - Changes the incident beam to 8deg from the normal to include\n specular reflection in any reflection sphere that may be present.\n"; cout << "\n--separate\n - Run simulations separately to measure diffuse reflectance and diffuse transmittance.\n Usually enabled when using a single sphere to measure R_d and T_d.\n Disable for dual-sphere experiments.\n >>> If separate is enabled then ALL of --Rsphere\n --Rangle --Tsphere and --Tangle must be specified for accurate results. <<<"; cout << "\n--example\n - Show an example configuration and run it.\n Makes all required parameters except --GPU optional.\n Useful for ensuring your installation works.\n"; cout << "\n--go - Just run the simulation. Don't ask to confirm.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nRequired Parameters:\n"; cout << "\n--GPU - Device IDs for GPUs to run on.\n Use the 'nvidia-smi' command to list available GPUs.\n Mutliple GPU IDs must be in quotes.\n Example: --GPU \"0 2\"\n"; cout << "\n--unc - Uncertainty threshold for all measured parameters. The number of photons will be\n calculated such that this uncertainty should be reached.\n"; cout << "\n-n - Relative refractive index (relative to the surrounding medium) to use for all samples.\n"; cout << "\n-g - Anisotropy of all samples.\n"; cout << "\n-t - Thickness of all samples in centimeters.\n"; cout << "\n--Nmua - Total number of absorption coefficients.\n"; cout << "\n--muaS - Starting value for the absorption coefficients in 1/cm.\n"; cout << "\n--muaB - Base of the geometric range for the absorption coefficients.\n"; cout << " Absorption coefficients will be generated with mua[i] = muaS*muaB^i for 0 <= i < Nmua.\n"; cout << "\n--Nmus - Total number of REDUCED scattering coefficients.\n"; cout << "\n--musS - Starting value for the REDUCED scattering coefficients in 1/cm.\n"; cout << "\n--musB - Base of the geometric range for the REDUCED scattering coefficients.\n"; cout << " REDUCED scattering coeffs will be generated with mus[i] = musS*musB^i for 0 <= i < Nmus.\n"; cout << " To convert from the reduced scattering coefficient, mus', to the scattering coefficient:\n"; cout << " mus = mus'/(1-g) for g!=1 and mus = mus' if g = 1.\n"; cout << "\n\nOptional Parameters:\n"; cout << "\n--est - Estimate of largest value to be measured. Will reduce number of photons\n necessary to reach certain error.\n Example: --est 0.75\n"; cout << "\n--Rsphere\n - Parameters of the sphere measuring reflectance. Must be in quotes and\n in the following order: pw fs fp fd\n - pw is the reflectance of the inner wall\n - fs is the sample port fractional area\n - fp is the source port fractional area.\n - fd is the detector fractional area\n Example: --Rsphere \"0.99 0.1 0.1 0.2\"\n >>> If --Rsphere is not specified then Rangle MUST be. <<<\n"; cout << "\n--Rangle\n - Angle threshold in degrees for what counts as specular\n reflectance when there is no reflection sphere present.\n"; cout << "\n--Tsphere\n - Parameters of the sphere measuring transmittance. Must be in quotes and\n in the following order: pw fs fp fd\n - pw is the reflectance of the inner wall\n - fs is the sample/source port fractional area\n - fp is the optional port fractional area.\n - fd is the detector fractional area\n Example: --Tsphere \"0.99 0.1 0.1 0.2\"\n >>> If --Tsphere is not specified then Tangle MUST be. <<<\n"; cout << "\n--Tangle\n - Angle threshold in degrees for what counts as unscattered\n transmittance when there is no transmission sphere present.\n"; cout << "\n-N - Can be used to override --unc. Must be used in conjuction with --runs.\n Number of photons to use per simulation.\n"; cout << "\n--runs - Can be used to override --unc. Must be used in conjuction with -N.\n Number of times to run each simulation.\n Results will report the average and standard error of these runs.\n"; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nNotes:\n"; cout << "If you are getting 'out of memory' errors, reduce N or change the RNGS variable in the source\ncode to something smaller.\n\n"; return 1; } if (!cmdOptionExists(argv, argv+argc, "--GPU")) { cout << "Must specify device IDs with the --GPU flag.\nUse --help to see available options.\n"; return 2; } srand(time(NULL)); //// This block finds the number of GPUs and declares variables for CURAND for each of them //// int nGPU = 0; char* GPU = getCmdOption(argv, argv+argc, "--GPU"); string GPUstr(GPU); stringstream GPUss1(GPUstr); int temp1; while (GPUss1 >> temp1) { nGPU++; } stringstream GPUss2(GPUstr); int temp2; int* GPUs = new int[nGPU]; int fGPU = 0; for (int i = 0; GPUss2 >> temp2; i++) { GPUs[i] = temp2; if (GPUs[i] > fGPU) { fGPU = GPUs[i]; } cout << "\n"; CUDABasicProperties(GPUs[i]); } cout << "\n"; fGPU++; rand_set = new bool[fGPU](); globalDeviceStates = new curandState*[fGPU]; //// //// //// //// //// //// //// //// //// //// bool go = cmdOptionExists(argv, argv+argc, "--go"); // placeholder parameters that will persist if the user doesn't set them //====Sphere Parameters====// bool R = false; // boolean to determine if the reflection sphere is present or not float R_pw = nanf("0"); // reflectance of reflection sphere float R_fs = nanf("0"); // sample port fraction of reflection sphere float R_fp = nanf("0"); // source port fraction of reflection sphere float R_fd = nanf("0"); // detector fraction of reflection sphere float R_f = nanf("0"); // total port fraction of reflection sphere float R_angle = nanf("0"); // angle threshold for specular reflection if no Rsphere is present bool specular_included = false; // boolean to determine if specular reflection is included or not bool T = false; // boolean to determine if the transmission sphere is present or not float T_pw = nanf("0"); // reflectance of transmission sphere float T_fs = nanf("0"); // sample port fraction of transmission sphere float T_fp = nanf("0"); // optional port fraction of transmission sphere float T_fd = nanf("0"); // detector fraction of transmission sphere float T_f = nanf("0"); // total port fraction of transmission sphere float T_angle = nanf("0"); // angle threshold for direct transmission if no Tsphere is present //========// int N = -1; int runs = -1; float uncertainty = 0; float estimate = 1; bool separate = false; // measure R_d and T_d separately if true float n[1] = {nanf("0")}; float g[1] = {nanf("0")}; float t[1] = {nanf("0")}; int N_mu_a = -1; float mu_a_start = nanf("0"); float mu_a_base = nanf("0"); int N_mu_s = -1; float mu_s__start = nanf("0"); float mu_s__base = nanf("0"); // example parameters if (cmdOptionExists(argv, argv+argc, "--example")) { //====Sphere Parameters====// R = true; // boolean to determine if the reflection sphere is present or not R_pw = 0.99; // reflectance of reflection sphere R_fs = 0.023510; // sample port fraction of reflection sphere R_fp = 0.054638; // source port fraction of reflection sphere R_fd = 0.023510; // detector fraction of reflection sphere R_f = R_fs + R_fp + R_fd; // total port fraction of reflection sphere R_angle = 0; // angle threshold for specular reflection if no Rsphere is present specular_included = false; // boolean to determine if specular reflection is included or not T = true; // boolean to determine if the transmission sphere is present or not T_pw = 0.99; // reflectance of transmission sphere T_fs = 0.054638; // sample port fraction of transmission sphere T_fp = 0.023510; // optional port fraction of transmission sphere T_fd = 0.023510; // detector fraction of transmission sphere T_f = T_fs + T_fp + T_fd; // total port fraction of transmission sphere T_angle = 0.22*M_PI/180; // angle threshold for direct transmission if no Tsphere is present //========// N = 100000; runs = 10; separate = true; n[0] = 1.4; g[0] = 0.5; t[0] = 0.135; N_mu_a = 13; mu_a_start = 0.5; mu_a_base = 1.2; N_mu_s = 11; mu_s__start = 10; mu_s__base = 1.2; cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nEXAMPLE LOOKUP TABLE SIMULATION\n"; printf("\nParameters (as found in Cook et al 2019):\n\n-N %d\n--runs %d\n-n %f\n-g %f\n-t %f\n--Nmua %d\n--muaS %f\n--muaB %f\n--Nmus %d\n--musS %f\n--musB %f\n--Rsphere \"%f %f %f %f\"\n--Rangle %f\n--Tsphere \"%f %f %f %f\"\n--Tangle %f\n--separate\n", N, runs, n[0], g[0], t[0], N_mu_a, mu_a_start, mu_a_base, N_mu_s, mu_s__start, mu_s__base, R_pw, R_fs, R_fp, R_fd, R_angle*180/M_PI, T_pw, T_fs, T_fp, T_fd, T_angle*180/M_PI); if (!go) { cout <<"\nPress [enter] to start or Ctrl+C to cancel."; getchar(); } } // user specified parameters parsed from command line else { if ((cmdOptionExists(argv, argv+argc, "-N") != cmdOptionExists(argv, argv+argc, "--runs")) && !(cmdOptionExists(argv, argv+argc, "--unc"))) { cout << "-N and --runs must be used in conjuction, not by themselves.\n"; return 2; } if ((cmdOptionExists(argv, argv+argc, "-N") || cmdOptionExists(argv, argv+argc, "--runs")) && (cmdOptionExists(argv, argv+argc, "--unc") || cmdOptionExists(argv, argv+argc, "--est"))) { cout << "-N and --runs cannot be used with --unc or --est\n"; return 2; } //====Sphere Parameters====// R = cmdOptionExists(argv, argv+argc, "--Rsphere"); // boolean to determine if the reflection sphere is present or not float* R_params = new float[4]; for (int i = 0; i < 4; i++) { R_params[i] = nanf("1"); } if (R) { R_params = readArrOption(argv, argv+argc, "--Rsphere", 4); } R_pw = R_params[0]; // reflectance of reflection sphere R_fs = R_params[1]; // sample port fraction of reflection sphere R_fp = R_params[2]; // source port fraction of reflection sphere R_fd = R_params[3]; // detector fraction of reflection sphere R_f = R_fs + R_fp + R_fd; // total port fraction of reflection sphere if (cmdOptionExists(argv, argv+argc, "--Rangle")){ R_angle = readFloatOption(argv, argv+argc, "--Rangle")*M_PI/180; } // angle threshold for specular reflection if no Rsphere is present else { R_angle = 0; } specular_included = cmdOptionExists(argv, argv+argc, "--specularIncluded"); // boolean to determine if specular reflection is included or not T = cmdOptionExists(argv, argv+argc, "--Tsphere"); // boolean to determine if the transmission sphere is present or not float* T_params = new float[4]; for (int i = 0; i < 4; i++) { T_params[i] = nanf("1"); } if (T) { T_params = readArrOption(argv, argv+argc, "--Tsphere", 4); } T_pw = T_params[0]; // reflectance of transmission sphere T_fs = T_params[1]; // sample port fraction of transmission sphere T_fp = T_params[2]; // optional port fraction of transmission sphere T_fd = T_params[3]; // detector fraction of transmission sphere T_f = T_fs + T_fp + T_fd; // total port fraction of transmission sphere if (cmdOptionExists(argv, argv+argc, "--Tangle")){ T_angle = readFloatOption(argv, argv+argc, "--Tangle")*M_PI/180; } // angle threshold for direct transmission if no Tsphere is present else { T_angle = 0; } //========// if (cmdOptionExists(argv, argv+argc, "-N") && cmdOptionExists(argv, argv+argc, "--runs")) { N = readIntOption(argv, argv+argc, "-N"); runs = readIntOption(argv, argv+argc, "--runs"); } else if (cmdOptionExists(argv, argv+argc, "--unc")) { runs = 0; if (cmdOptionExists(argv, argv+argc, "--est")) { estimate = readFloatOption(argv, argv+argc, "--est"); } uncertainty = readFloatOption(argv, argv+argc, "--unc"); N = (int)ceil(estimate/(uncertainty*uncertainty)); } separate = cmdOptionExists(argv, argv+argc, "--separate"); n[0] = readFloatOption(argv, argv+argc, "-n"); g[0] = readFloatOption(argv, argv+argc, "-g"); t[0] = readFloatOption(argv, argv+argc, "-t"); N_mu_a = readIntOption(argv, argv+argc, "--Nmua"); mu_a_start = readFloatOption(argv, argv+argc, "--muaS"); mu_a_base = readFloatOption(argv, argv+argc, "--muaB"); N_mu_s = readIntOption(argv, argv+argc, "--Nmus"); mu_s__start = readFloatOption(argv, argv+argc, "--musS"); mu_s__base = readFloatOption(argv, argv+argc, "--musB"); cout << "\n---------------------------------------------------------------------------------------\n"; cout << "\nParameters:\n"; if ( cmdOptionExists(argv, argc+argv, "--unc") ) { printf("\n--unc %f", uncertainty); } if ( cmdOptionExists(argv, argc+argv, "--est") ) { printf("\n--est %f", estimate); } if ( cmdOptionExists(argv, argc+argv, "-N") ) { printf("\n-N %d\n--runs %d", N, runs); } else { printf("\n-N %d (calculated)", N); } printf("\n-n %f\n-g %f\n-t %f\n--Nmua %d\n--muaS %f\n--muaB %f\n--Nmus %d\n--musS %f\n--musB %f\n--Rsphere \"%f %f %f %f\"\n--Rangle %f\n--Tsphere \"%f %f %f %f\"\n--Tangle %f\n", n[0], g[0], t[0], N_mu_a, mu_a_start, mu_a_base, N_mu_s, mu_s__start, mu_s__base, R_pw, R_fs, R_fp, R_fd, R_angle*180/M_PI, T_pw, T_fs, T_fp, T_fd, T_angle*180/M_PI); if (!go) { cout <<"\nPress [enter] to start or Ctrl+C to cancel.\n"; getchar(); } } // calculate the arrays that store the entire space of mua and mus' float* mu_a = new float[N_mu_a]; float* mu_s_= new float[N_mu_s]; for (int i = 0; i < N_mu_a; i++) { mu_a[i] = mu_a_start*pow(mu_a_base,i); } for (int i = 0; i < N_mu_s; i++) { mu_s_[i] = mu_s__start*pow(mu_s__base,i); } float* mu_s = new float[N_mu_a]; for (int i = 0; i < N_mu_s; i++) { if (g[0] == 1) { mu_s[i] = mu_s_[i]; } else { mu_s[i] = mu_s_[i]/(1-g[0]); } } int layers = 1; // define layer boundaries float* bounds = new float[layers+1]; bounds[0] = 0; for( int i = 1; i < layers+1; i++ ) { bounds[i] = bounds[i-1] + t[i-1]; } // generate the data for the lookup table singleLayerSampleSpace(GPUs, nGPU, N, runs, N_mu_a, N_mu_s, n, g, t, bounds, mu_a, mu_s_, R, R_pw, R_fs, R_fp, R_fd, R_f, R_angle, specular_included, T, T_pw, T_fs, T_fp, T_fd, T_f, T_angle, separate); return 0; }
b16bd8ad30e5ca7f83fa253f5726e2df49399bf8.hip
// !!! This is a file automatically generated by hipify!!! #include "file_system.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> __device__ void user_program(FileSystem *fs, uchar *input, uchar *output) { /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); /* /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); */ }
b16bd8ad30e5ca7f83fa253f5726e2df49399bf8.cu
#include "file_system.h" #include <cuda.h> #include <cuda_runtime.h> __device__ void user_program(FileSystem *fs, uchar *input, uchar *output) { /////////////// Test Case 1 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); /* /////////////// Test Case 2 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs,input, 64, fp); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_WRITE); fs_write(fs,input + 32, 32, fp); fp = fs_open(fs,"t.txt\0", G_READ); fs_read(fs,output, 32, fp); fs_gsys(fs,LS_D); fs_gsys(fs,LS_S); fp = fs_open(fs,"b.txt\0", G_WRITE); fs_write(fs,input + 64, 12, fp); fs_gsys(fs,LS_S); fs_gsys(fs,LS_D); fs_gsys(fs,RM, "t.txt\0"); fs_gsys(fs,LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs,fname[i], G_WRITE); fs_write(fs,input + i, 24 + i, fp); } fs_gsys(fs,LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs,RM, fname[i]); fs_gsys(fs,LS_D); /////////////// Test Case 3 /////////////// u32 fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input, 64, fp); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_WRITE); fs_write(fs, input + 32, 32, fp); fp = fs_open(fs, "t.txt\0", G_READ); fs_read(fs, output, 32, fp); fs_gsys(fs, LS_D); fs_gsys(fs, LS_S); fp = fs_open(fs, "b.txt\0", G_WRITE); fs_write(fs, input + 64, 12, fp); fs_gsys(fs, LS_S); fs_gsys(fs, LS_D); fs_gsys(fs, RM, "t.txt\0"); fs_gsys(fs, LS_S); char fname[10][20]; for (int i = 0; i < 10; i++) { fname[i][0] = i + 33; for (int j = 1; j < 19; j++) fname[i][j] = 64 + j; fname[i][19] = '\0'; } for (int i = 0; i < 10; i++) { fp = fs_open(fs, fname[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); for (int i = 0; i < 5; i++) fs_gsys(fs, RM, fname[i]); fs_gsys(fs, LS_D); char fname2[1018][20]; int p = 0; for (int k = 2; k < 15; k++) for (int i = 50; i <= 126; i++, p++) { fname2[p][0] = i; for (int j = 1; j < k; j++) fname2[p][j] = 64 + j; fname2[p][k] = '\0'; } for (int i = 0; i < 1001; i++) { fp = fs_open(fs, fname2[i], G_WRITE); fs_write(fs, input + i, 24 + i, fp); } fs_gsys(fs, LS_S); fp = fs_open(fs, fname2[1000], G_READ); fs_read(fs, output + 1000, 1024, fp); char fname3[17][3]; for (int i = 0; i < 17; i++) { fname3[i][0] = 97 + i; fname3[i][1] = 97 + i; fname3[i][2] = '\0'; fp = fs_open(fs, fname3[i], G_WRITE); fs_write(fs, input + 1024 * i, 1024, fp); } fp = fs_open(fs, "EA\0", G_WRITE); fs_write(fs, input + 1024 * 100, 1024, fp); fs_gsys(fs, LS_S); */ }
8ed1f395646928e0de86cd2dc7e039216e24ec5d.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cuda #define EIGEN_USE_GPU #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<float, 1, 0, int> in1(2); Tensor<float, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t tensor_bytes = in1.size() * sizeof(float); float* d_in1; float* d_in2; hipMalloc((void**)(&d_in1), tensor_bytes); hipMalloc((void**)(&d_in2), tensor_bytes); hipMemcpy(d_in1, in1.data(), tensor_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), tensor_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f); gpu_in2.device(gpu_device) = gpu_in2.random(); Tensor<float, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(hipMemcpyAsync(new1.data(), d_in1, tensor_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipMemcpyAsync(new2.data(), d_in2, tensor_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), 3.14f); VERIFY_IS_NOT_EQUAL(new2(i), in2(i)); } hipFree(d_in1); hipFree(d_in2); } void test_cuda_elementwise_small() { Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2)); in1.setRandom(); in2.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_in2), in2_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), in2_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in2( d_in2, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_out( d_out, Eigen::array<Eigen::DenseIndex, 1>(2)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX( out(Eigen::array<Eigen::DenseIndex, 1>(i)), in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i))); } hipFree(d_in1); hipFree(d_in2); hipFree(d_out); } void test_cuda_elementwise() { Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in3(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> out(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); in1.setRandom(); in2.setRandom(); in3.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t in3_bytes = in3.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_in3; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_in2), in2_bytes); hipMalloc((void**)(&d_in3), in3_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), in2_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in3, in3.data(), in3_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in3(d_in3, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3; assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 53; ++j) { for (int k = 0; k < 97; ++k) { VERIFY_IS_APPROX(out(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)), in1(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) + in2(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) * in3(Eigen::array<Eigen::DenseIndex, 3>(i,j,k))); } } } hipFree(d_in1); hipFree(d_in2); hipFree(d_in3); hipFree(d_out); } void test_cuda_props() { Tensor<float, 1> in1(200); Tensor<bool, 1> out(200); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(bool); float* d_in1; bool* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, 200); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_out( d_out, 200); gpu_out.device(gpu_device) = (gpu_in1.isnan)(); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 200; ++i) { VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i))); } hipFree(d_in1); hipFree(d_out); } void test_cuda_reduction() { Tensor<float, 4> in1(72,53,97,113); Tensor<float, 2> out(72,97); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_out; hipMalloc((void**)(&d_in1), in1_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_in1, in1.data(), in1_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); array<Eigen::DenseIndex, 2> reduction_axis; reduction_axis[0] = 1; reduction_axis[1] = 3; gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { float expected = 0; for (int k = 0; k < 53; ++k) { for (int l = 0; l < 113; ++l) { expected = std::max<float>(expected, in1(i, k, j, l)); } } VERIFY_IS_APPROX(out(i,j), expected); } } hipFree(d_in1); hipFree(d_out); } template<int DataLayout> void test_cuda_contraction() { // with these dimensions, the output has 300 * 140 elements, which is // more than 30 * 1024, which is the number of threads in blocks on // a 15 SM GK110 GPU Tensor<float, 4, DataLayout> t_left(6, 50, 3, 31); Tensor<float, 5, DataLayout> t_right(Eigen::array<Eigen::DenseIndex, 5>(3, 31, 7, 20, 1)); Tensor<float, 5, DataLayout> t_result(Eigen::array<Eigen::DenseIndex, 5>(6, 50, 7, 20, 1)); t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; hipMalloc((void**)(&d_t_left), t_left_bytes); hipMalloc((void**)(&d_t_right), t_right_bytes); hipMalloc((void**)(&d_t_result), t_result_bytes); hipMemcpy(d_t_left, t_left.data(), t_left_bytes, hipMemcpyHostToDevice); hipMemcpy(d_t_right, t_right.data(), t_right_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_right(d_t_right, 3, 31, 7, 20, 1); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_result(d_t_result, 6, 50, 7, 20, 1); typedef Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> > MapXf; MapXf m_left(t_left.data(), 300, 93); MapXf m_right(t_right.data(), 93, 140); Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(300, 140); typedef Tensor<float, 1>::DimensionPair DimPair; Eigen::array<DimPair, 2> dims; dims[0] = DimPair(2, 0); dims[1] = DimPair(3, 1); m_result = m_left * m_right; gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); hipMemcpy(t_result.data(), d_t_result, t_result_bytes, hipMemcpyDeviceToHost); for (DenseIndex i = 0; i < t_result.size(); i++) { if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; assert(false); } } hipFree(d_t_left); hipFree(d_t_right); hipFree(d_t_result); } template<int DataLayout> void test_cuda_convolution_1d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 1, DataLayout> kernel(4); Tensor<float, 4, DataLayout> out(74,34,11,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 1, DataLayout> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out, 74,34,11,137); Eigen::array<Eigen::DenseIndex, 1> dims(1); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 34; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) + input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } void test_cuda_convolution_inner_dim_col_major_1d() { Tensor<float, 4, ColMajor> input(74,9,11,7); Tensor<float, 1, ColMajor> kernel(4); Tensor<float, 4, ColMajor> out(71,9,11,7); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7); Eigen::TensorMap<Eigen::Tensor<float, 1, ColMajor> > gpu_kernel(d_kernel,4); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_out(d_out,71,9,11,7); Eigen::array<Eigen::DenseIndex, 1> dims(0); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 71; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 7; ++l) { const float result = out(i,j,k,l); const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) + input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } void test_cuda_convolution_inner_dim_row_major_1d() { Tensor<float, 4, RowMajor> input(7,9,11,74); Tensor<float, 1, RowMajor> kernel(4); Tensor<float, 4, RowMajor> out(7,9,11,71); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74); Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_out(d_out, 7,9,11,71); Eigen::array<Eigen::DenseIndex, 1> dims(3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 7; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 71; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) + input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template<int DataLayout> void test_cuda_convolution_2d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 2, DataLayout> kernel(3,4); Tensor<float, 4, DataLayout> out(74,35,8,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> > gpu_kernel(d_kernel,3,4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out,74,35,8,137); Eigen::array<Eigen::DenseIndex, 2> dims(1,2); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k+0,l) * kernel(0,0) + input(i,j+1,k+0,l) * kernel(1,0) + input(i,j+2,k+0,l) * kernel(2,0) + input(i,j+0,k+1,l) * kernel(0,1) + input(i,j+1,k+1,l) * kernel(1,1) + input(i,j+2,k+1,l) * kernel(2,1) + input(i,j+0,k+2,l) * kernel(0,2) + input(i,j+1,k+2,l) * kernel(1,2) + input(i,j+2,k+2,l) * kernel(2,2) + input(i,j+0,k+3,l) * kernel(0,3) + input(i,j+1,k+3,l) * kernel(1,3) + input(i,j+2,k+3,l) * kernel(2,3); VERIFY_IS_APPROX(result, expected); } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template<int DataLayout> void test_cuda_convolution_3d() { Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17)); Tensor<float, 3, DataLayout> kernel(3,4,2); Tensor<float, 5, DataLayout> out(Eigen::array<Eigen::DenseIndex, 5>(74,35,8,136,17)); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; hipMalloc((void**)(&d_input), input_bytes); hipMalloc((void**)(&d_kernel), kernel_bytes); hipMalloc((void**)(&d_out), out_bytes); hipMemcpy(d_input, input.data(), input_bytes, hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel.data(), kernel_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_kernel(d_kernel,3,4,2); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_out(d_out,74,35,8,136,17); Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(hipMemcpyAsync(out.data(), d_out, out_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 136; ++l) { for (int m = 0; m < 17; ++m) { const float result = out(i,j,k,l,m); const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) + input(i,j+1,k+0,l+0,m) * kernel(1,0,0) + input(i,j+2,k+0,l+0,m) * kernel(2,0,0) + input(i,j+0,k+1,l+0,m) * kernel(0,1,0) + input(i,j+1,k+1,l+0,m) * kernel(1,1,0) + input(i,j+2,k+1,l+0,m) * kernel(2,1,0) + input(i,j+0,k+2,l+0,m) * kernel(0,2,0) + input(i,j+1,k+2,l+0,m) * kernel(1,2,0) + input(i,j+2,k+2,l+0,m) * kernel(2,2,0) + input(i,j+0,k+3,l+0,m) * kernel(0,3,0) + input(i,j+1,k+3,l+0,m) * kernel(1,3,0) + input(i,j+2,k+3,l+0,m) * kernel(2,3,0) + input(i,j+0,k+0,l+1,m) * kernel(0,0,1) + input(i,j+1,k+0,l+1,m) * kernel(1,0,1) + input(i,j+2,k+0,l+1,m) * kernel(2,0,1) + input(i,j+0,k+1,l+1,m) * kernel(0,1,1) + input(i,j+1,k+1,l+1,m) * kernel(1,1,1) + input(i,j+2,k+1,l+1,m) * kernel(2,1,1) + input(i,j+0,k+2,l+1,m) * kernel(0,2,1) + input(i,j+1,k+2,l+1,m) * kernel(1,2,1) + input(i,j+2,k+2,l+1,m) * kernel(2,2,1) + input(i,j+0,k+3,l+1,m) * kernel(0,3,1) + input(i,j+1,k+3,l+1,m) * kernel(1,3,1) + input(i,j+2,k+3,l+1,m) * kernel(2,3,1); VERIFY_IS_APPROX(result, expected); } } } } } hipFree(d_input); hipFree(d_kernel); hipFree(d_out); } template <typename Scalar> void test_cuda_lgamma(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.lgamma(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_digamma() { Tensor<Scalar, 1> in(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in(0) = Scalar(1); in(1) = Scalar(1.5); in(2) = Scalar(4); in(3) = Scalar(-10.5); in(4) = Scalar(10000.5); in(5) = Scalar(0); in(6) = Scalar(-1); expected_out(0) = Scalar(-0.5772156649015329); expected_out(1) = Scalar(0.03648997397857645); expected_out(2) = Scalar(1.2561176684318); expected_out(3) = Scalar(2.398239129535781); expected_out(4) = Scalar(9.210340372392849); expected_out(5) = std::numeric_limits<Scalar>::infinity(); expected_out(6) = std::numeric_limits<Scalar>::infinity(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in.digamma(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 5; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } for (int i = 5; i < 7; ++i) { VERIFY_IS_EQUAL(out(i), expected_out(i)); } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_zeta() { Tensor<Scalar, 1> in_x(6); Tensor<Scalar, 1> in_q(6); Tensor<Scalar, 1> out(6); Tensor<Scalar, 1> expected_out(6); out.setZero(); in_x(0) = Scalar(1); in_x(1) = Scalar(1.5); in_x(2) = Scalar(4); in_x(3) = Scalar(-10.5); in_x(4) = Scalar(10000.5); in_x(5) = Scalar(3); in_q(0) = Scalar(1.2345); in_q(1) = Scalar(2); in_q(2) = Scalar(1.5); in_q(3) = Scalar(3); in_q(4) = Scalar(1.0001); in_q(5) = Scalar(-2.5); expected_out(0) = std::numeric_limits<Scalar>::infinity(); expected_out(1) = Scalar(1.61237534869); expected_out(2) = Scalar(0.234848505667); expected_out(3) = Scalar(1.03086757337e-5); expected_out(4) = Scalar(0.367879440865); expected_out(5) = Scalar(0.054102025820864097); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_q; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_q), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_q, in_q.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_q(d_in_q, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 6); gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); VERIFY_IS_EQUAL(out(0), expected_out(0)); VERIFY((std::isnan)(out(3))); for (int i = 1; i < 6; ++i) { if (i != 3) { VERIFY_IS_APPROX(out(i), expected_out(i)); } } hipFree(d_in_x); hipFree(d_in_q); hipFree(d_out); } template <typename Scalar> void test_cuda_polygamma() { Tensor<Scalar, 1> in_x(7); Tensor<Scalar, 1> in_n(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in_n(0) = Scalar(1); in_n(1) = Scalar(1); in_n(2) = Scalar(1); in_n(3) = Scalar(17); in_n(4) = Scalar(31); in_n(5) = Scalar(28); in_n(6) = Scalar(8); in_x(0) = Scalar(2); in_x(1) = Scalar(3); in_x(2) = Scalar(25.5); in_x(3) = Scalar(4.7); in_x(4) = Scalar(11.8); in_x(5) = Scalar(17.7); in_x(6) = Scalar(30.2); expected_out(0) = Scalar(0.644934066848); expected_out(1) = Scalar(0.394934066848); expected_out(2) = Scalar(0.0399946696496); expected_out(3) = Scalar(293.334565435); expected_out(4) = Scalar(0.445487887616); expected_out(5) = Scalar(-2.47810300902e-07); expected_out(6) = Scalar(-8.29668781082e-09); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_n; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_n), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_n, in_n.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_n(d_in_n, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 7; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } hipFree(d_in_x); hipFree(d_in_n); hipFree(d_out); } template <typename Scalar> void test_cuda_igamma() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan}, {0.0, 0.6321205588285578, 0.7768698398515702, 0.9816843611112658, 9.999500016666262e-05, 1.0}, {0.0, 0.4275932955291202, 0.608374823728911, 0.9539882943107686, 7.522076445089201e-07, 1.0}, {0.0, 0.01898815687615381, 0.06564245437845008, 0.5665298796332909, 4.166333347221828e-18, 1.0}, {0.0, 0.9999780593618628, 0.9999899967080838, 0.9999996219837988, 0.9991370418689945, 1.0}, {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; assert(hipMalloc((void**)(&d_a), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_x), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_out), bytes) == hipSuccess); hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_x, x.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igamma_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]); } } } hipFree(d_a); hipFree(d_x); hipFree(d_out); } template <typename Scalar> void test_cuda_igammac() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan}, {1.0, 0.36787944117144233, 0.22313016014842982, 0.018315638888734182, 0.9999000049998333, 0.0}, {1.0, 0.5724067044708798, 0.3916251762710878, 0.04601170568923136, 0.9999992477923555, 0.0}, {1.0, 0.9810118431238462, 0.9343575456215499, 0.4334701203667089, 1.0, 0.0}, {1.0, 2.1940638138146658e-05, 1.0003291916285e-05, 3.7801620118431334e-07, 0.0008629581310054535, 0.0}, {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; hipMalloc((void**)(&d_a), bytes); hipMalloc((void**)(&d_x), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_x, x.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igammac_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]); } } } hipFree(d_a); hipFree(d_x); hipFree(d_out); } template <typename Scalar> void test_cuda_erf(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; assert(hipMalloc((void**)(&d_in), bytes) == hipSuccess); assert(hipMalloc((void**)(&d_out), bytes) == hipSuccess); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erf(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_erfc(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; hipMalloc((void**)(&d_in), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in, in.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erfc(); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j))); } } hipFree(d_in); hipFree(d_out); } template <typename Scalar> void test_cuda_betainc() { Tensor<Scalar, 1> in_x(125); Tensor<Scalar, 1> in_a(125); Tensor<Scalar, 1> in_b(125); Tensor<Scalar, 1> out(125); Tensor<Scalar, 1> expected_out(125); out.setZero(); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Array<Scalar, 1, Dynamic> x(125); Array<Scalar, 1, Dynamic> a(125); Array<Scalar, 1, Dynamic> b(125); Array<Scalar, 1, Dynamic> v(125); a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999; b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999; x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1; v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan, nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444, nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan; for (int i = 0; i < 125; ++i) { in_x(i) = x(i); in_a(i) = a(i); in_b(i) = b(i); expected_out(i) = v(i); } std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_a; Scalar* d_in_b; Scalar* d_out; hipMalloc((void**)(&d_in_x), bytes); hipMalloc((void**)(&d_in_a), bytes); hipMalloc((void**)(&d_in_b), bytes); hipMalloc((void**)(&d_out), bytes); hipMemcpy(d_in_x, in_x.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_a, in_a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_in_b, in_b.data(), bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_a(d_in_a, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_b(d_in_b, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 125); gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x); assert(hipMemcpyAsync(out.data(), d_out, bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 1; i < 125; ++i) { if ((std::isnan)(expected_out(i))) { VERIFY((std::isnan)(out(i))); } else { VERIFY_IS_APPROX(out(i), expected_out(i)); } } hipFree(d_in_x); hipFree(d_in_a); hipFree(d_in_b); hipFree(d_out); } void test_cxx11_tensor_cuda() { CALL_SUBTEST_1(test_cuda_nullary()); CALL_SUBTEST_1(test_cuda_elementwise_small()); CALL_SUBTEST_1(test_cuda_elementwise()); CALL_SUBTEST_1(test_cuda_props()); CALL_SUBTEST_1(test_cuda_reduction()); CALL_SUBTEST_2(test_cuda_contraction<ColMajor>()); CALL_SUBTEST_2(test_cuda_contraction<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>()); #if __cplusplus > 199711L // std::erf, std::erfc, and so on where only added in c++11. We use them // as a golden reference to validate the results produced by Eigen. Therefore // we can only run these tests if we use a c++11 compiler. CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f)); CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001)); CALL_SUBTEST_4(test_cuda_erf<float>(1.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f)); // CALL_SUBTEST(test_cuda_erfc<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erf<double>(1.0)); CALL_SUBTEST_4(test_cuda_erf<double>(100.0)); CALL_SUBTEST_4(test_cuda_erf<double>(0.01)); CALL_SUBTEST_4(test_cuda_erf<double>(0.001)); CALL_SUBTEST_4(test_cuda_erfc<double>(1.0)); // CALL_SUBTEST(test_cuda_erfc<double>(100.0)); CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<double>(0.01)); CALL_SUBTEST_4(test_cuda_erfc<double>(0.001)); CALL_SUBTEST_5(test_cuda_digamma<float>()); CALL_SUBTEST_5(test_cuda_digamma<double>()); CALL_SUBTEST_5(test_cuda_polygamma<float>()); CALL_SUBTEST_5(test_cuda_polygamma<double>()); CALL_SUBTEST_5(test_cuda_zeta<float>()); CALL_SUBTEST_5(test_cuda_zeta<double>()); CALL_SUBTEST_5(test_cuda_igamma<float>()); CALL_SUBTEST_5(test_cuda_igammac<float>()); CALL_SUBTEST_5(test_cuda_igamma<double>()); CALL_SUBTEST_5(test_cuda_igammac<double>()); CALL_SUBTEST_6(test_cuda_betainc<float>()); CALL_SUBTEST_6(test_cuda_betainc<double>()); #endif }
8ed1f395646928e0de86cd2dc7e039216e24ec5d.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_cuda #define EIGEN_USE_GPU #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<float, 1, 0, int> in1(2); Tensor<float, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t tensor_bytes = in1.size() * sizeof(float); float* d_in1; float* d_in2; cudaMalloc((void**)(&d_in1), tensor_bytes); cudaMalloc((void**)(&d_in2), tensor_bytes); cudaMemcpy(d_in1, in1.data(), tensor_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), tensor_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(3.14f); gpu_in2.device(gpu_device) = gpu_in2.random(); Tensor<float, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(cudaMemcpyAsync(new1.data(), d_in1, tensor_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaMemcpyAsync(new2.data(), d_in2, tensor_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), 3.14f); VERIFY_IS_NOT_EQUAL(new2(i), in2(i)); } cudaFree(d_in1); cudaFree(d_in2); } void test_cuda_elementwise_small() { Tensor<float, 1> in1(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> in2(Eigen::array<Eigen::DenseIndex, 1>(2)); Tensor<float, 1> out(Eigen::array<Eigen::DenseIndex, 1>(2)); in1.setRandom(); in2.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_in2), in2_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in2( d_in2, Eigen::array<Eigen::DenseIndex, 1>(2)); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_out( d_out, Eigen::array<Eigen::DenseIndex, 1>(2)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2; assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX( out(Eigen::array<Eigen::DenseIndex, 1>(i)), in1(Eigen::array<Eigen::DenseIndex, 1>(i)) + in2(Eigen::array<Eigen::DenseIndex, 1>(i))); } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); } void test_cuda_elementwise() { Tensor<float, 3> in1(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in2(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> in3(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Tensor<float, 3> out(Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); in1.setRandom(); in2.setRandom(); in3.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t in2_bytes = in2.size() * sizeof(float); std::size_t in3_bytes = in3.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_in2; float* d_in3; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_in2), in2_bytes); cudaMalloc((void**)(&d_in3), in3_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), in2_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in3, in3.data(), in3_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in1(d_in1, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in2(d_in2, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_in3(d_in3, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); Eigen::TensorMap<Eigen::Tensor<float, 3> > gpu_out(d_out, Eigen::array<Eigen::DenseIndex, 3>(72,53,97)); gpu_out.device(gpu_device) = gpu_in1 + gpu_in2 * gpu_in3; assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 53; ++j) { for (int k = 0; k < 97; ++k) { VERIFY_IS_APPROX(out(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)), in1(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) + in2(Eigen::array<Eigen::DenseIndex, 3>(i,j,k)) * in3(Eigen::array<Eigen::DenseIndex, 3>(i,j,k))); } } } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_in3); cudaFree(d_out); } void test_cuda_props() { Tensor<float, 1> in1(200); Tensor<bool, 1> out(200); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(bool); float* d_in1; bool* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 1>, Eigen::Aligned> gpu_in1( d_in1, 200); Eigen::TensorMap<Eigen::Tensor<bool, 1>, Eigen::Aligned> gpu_out( d_out, 200); gpu_out.device(gpu_device) = (gpu_in1.isnan)(); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 200; ++i) { VERIFY_IS_EQUAL(out(i), (std::isnan)(in1(i))); } cudaFree(d_in1); cudaFree(d_out); } void test_cuda_reduction() { Tensor<float, 4> in1(72,53,97,113); Tensor<float, 2> out(72,97); in1.setRandom(); std::size_t in1_bytes = in1.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_in1; float* d_out; cudaMalloc((void**)(&d_in1), in1_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_in1, in1.data(), in1_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4> > gpu_in1(d_in1, 72,53,97,113); Eigen::TensorMap<Eigen::Tensor<float, 2> > gpu_out(d_out, 72,97); array<Eigen::DenseIndex, 2> reduction_axis; reduction_axis[0] = 1; reduction_axis[1] = 3; gpu_out.device(gpu_device) = gpu_in1.maximum(reduction_axis); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { float expected = 0; for (int k = 0; k < 53; ++k) { for (int l = 0; l < 113; ++l) { expected = std::max<float>(expected, in1(i, k, j, l)); } } VERIFY_IS_APPROX(out(i,j), expected); } } cudaFree(d_in1); cudaFree(d_out); } template<int DataLayout> void test_cuda_contraction() { // with these dimensions, the output has 300 * 140 elements, which is // more than 30 * 1024, which is the number of threads in blocks on // a 15 SM GK110 GPU Tensor<float, 4, DataLayout> t_left(6, 50, 3, 31); Tensor<float, 5, DataLayout> t_right(Eigen::array<Eigen::DenseIndex, 5>(3, 31, 7, 20, 1)); Tensor<float, 5, DataLayout> t_result(Eigen::array<Eigen::DenseIndex, 5>(6, 50, 7, 20, 1)); t_left.setRandom(); t_right.setRandom(); std::size_t t_left_bytes = t_left.size() * sizeof(float); std::size_t t_right_bytes = t_right.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_left; float* d_t_right; float* d_t_result; cudaMalloc((void**)(&d_t_left), t_left_bytes); cudaMalloc((void**)(&d_t_right), t_right_bytes); cudaMalloc((void**)(&d_t_result), t_result_bytes); cudaMemcpy(d_t_left, t_left.data(), t_left_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_t_right, t_right.data(), t_right_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_t_left(d_t_left, 6, 50, 3, 31); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_right(d_t_right, 3, 31, 7, 20, 1); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_t_result(d_t_result, 6, 50, 7, 20, 1); typedef Eigen::Map<Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> > MapXf; MapXf m_left(t_left.data(), 300, 93); MapXf m_right(t_right.data(), 93, 140); Eigen::Matrix<float, Dynamic, Dynamic, DataLayout> m_result(300, 140); typedef Tensor<float, 1>::DimensionPair DimPair; Eigen::array<DimPair, 2> dims; dims[0] = DimPair(2, 0); dims[1] = DimPair(3, 1); m_result = m_left * m_right; gpu_t_result.device(gpu_device) = gpu_t_left.contract(gpu_t_right, dims); cudaMemcpy(t_result.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost); for (DenseIndex i = 0; i < t_result.size(); i++) { if (fabs(t_result.data()[i] - m_result.data()[i]) >= 1e-4f) { std::cout << "mismatch detected at index " << i << ": " << t_result.data()[i] << " vs " << m_result.data()[i] << std::endl; assert(false); } } cudaFree(d_t_left); cudaFree(d_t_right); cudaFree(d_t_result); } template<int DataLayout> void test_cuda_convolution_1d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 1, DataLayout> kernel(4); Tensor<float, 4, DataLayout> out(74,34,11,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input, 74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 1, DataLayout> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out, 74,34,11,137); Eigen::array<Eigen::DenseIndex, 1> dims(1); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 34; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k,l) * kernel(0) + input(i,j+1,k,l) * kernel(1) + input(i,j+2,k,l) * kernel(2) + input(i,j+3,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } void test_cuda_convolution_inner_dim_col_major_1d() { Tensor<float, 4, ColMajor> input(74,9,11,7); Tensor<float, 1, ColMajor> kernel(4); Tensor<float, 4, ColMajor> out(71,9,11,7); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_input(d_input,74,9,11,7); Eigen::TensorMap<Eigen::Tensor<float, 1, ColMajor> > gpu_kernel(d_kernel,4); Eigen::TensorMap<Eigen::Tensor<float, 4, ColMajor> > gpu_out(d_out,71,9,11,7); Eigen::array<Eigen::DenseIndex, 1> dims(0); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 71; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 7; ++l) { const float result = out(i,j,k,l); const float expected = input(i+0,j,k,l) * kernel(0) + input(i+1,j,k,l) * kernel(1) + input(i+2,j,k,l) * kernel(2) + input(i+3,j,k,l) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } void test_cuda_convolution_inner_dim_row_major_1d() { Tensor<float, 4, RowMajor> input(7,9,11,74); Tensor<float, 1, RowMajor> kernel(4); Tensor<float, 4, RowMajor> out(7,9,11,71); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_input(d_input, 7,9,11,74); Eigen::TensorMap<Eigen::Tensor<float, 1, RowMajor> > gpu_kernel(d_kernel, 4); Eigen::TensorMap<Eigen::Tensor<float, 4, RowMajor> > gpu_out(d_out, 7,9,11,71); Eigen::array<Eigen::DenseIndex, 1> dims(3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 7; ++i) { for (int j = 0; j < 9; ++j) { for (int k = 0; k < 11; ++k) { for (int l = 0; l < 71; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j,k,l+0) * kernel(0) + input(i,j,k,l+1) * kernel(1) + input(i,j,k,l+2) * kernel(2) + input(i,j,k,l+3) * kernel(3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template<int DataLayout> void test_cuda_convolution_2d() { Tensor<float, 4, DataLayout> input(74,37,11,137); Tensor<float, 2, DataLayout> kernel(3,4); Tensor<float, 4, DataLayout> out(74,35,8,137); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_input(d_input,74,37,11,137); Eigen::TensorMap<Eigen::Tensor<float, 2, DataLayout> > gpu_kernel(d_kernel,3,4); Eigen::TensorMap<Eigen::Tensor<float, 4, DataLayout> > gpu_out(d_out,74,35,8,137); Eigen::array<Eigen::DenseIndex, 2> dims(1,2); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 137; ++l) { const float result = out(i,j,k,l); const float expected = input(i,j+0,k+0,l) * kernel(0,0) + input(i,j+1,k+0,l) * kernel(1,0) + input(i,j+2,k+0,l) * kernel(2,0) + input(i,j+0,k+1,l) * kernel(0,1) + input(i,j+1,k+1,l) * kernel(1,1) + input(i,j+2,k+1,l) * kernel(2,1) + input(i,j+0,k+2,l) * kernel(0,2) + input(i,j+1,k+2,l) * kernel(1,2) + input(i,j+2,k+2,l) * kernel(2,2) + input(i,j+0,k+3,l) * kernel(0,3) + input(i,j+1,k+3,l) * kernel(1,3) + input(i,j+2,k+3,l) * kernel(2,3); VERIFY_IS_APPROX(result, expected); } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template<int DataLayout> void test_cuda_convolution_3d() { Tensor<float, 5, DataLayout> input(Eigen::array<Eigen::DenseIndex, 5>(74,37,11,137,17)); Tensor<float, 3, DataLayout> kernel(3,4,2); Tensor<float, 5, DataLayout> out(Eigen::array<Eigen::DenseIndex, 5>(74,35,8,136,17)); input = input.constant(10.0f) + input.random(); kernel = kernel.constant(7.0f) + kernel.random(); std::size_t input_bytes = input.size() * sizeof(float); std::size_t kernel_bytes = kernel.size() * sizeof(float); std::size_t out_bytes = out.size() * sizeof(float); float* d_input; float* d_kernel; float* d_out; cudaMalloc((void**)(&d_input), input_bytes); cudaMalloc((void**)(&d_kernel), kernel_bytes); cudaMalloc((void**)(&d_out), out_bytes); cudaMemcpy(d_input, input.data(), input_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel.data(), kernel_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_input(d_input,74,37,11,137,17); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_kernel(d_kernel,3,4,2); Eigen::TensorMap<Eigen::Tensor<float, 5, DataLayout> > gpu_out(d_out,74,35,8,136,17); Eigen::array<Eigen::DenseIndex, 3> dims(1,2,3); gpu_out.device(gpu_device) = gpu_input.convolve(gpu_kernel, dims); assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 74; ++i) { for (int j = 0; j < 35; ++j) { for (int k = 0; k < 8; ++k) { for (int l = 0; l < 136; ++l) { for (int m = 0; m < 17; ++m) { const float result = out(i,j,k,l,m); const float expected = input(i,j+0,k+0,l+0,m) * kernel(0,0,0) + input(i,j+1,k+0,l+0,m) * kernel(1,0,0) + input(i,j+2,k+0,l+0,m) * kernel(2,0,0) + input(i,j+0,k+1,l+0,m) * kernel(0,1,0) + input(i,j+1,k+1,l+0,m) * kernel(1,1,0) + input(i,j+2,k+1,l+0,m) * kernel(2,1,0) + input(i,j+0,k+2,l+0,m) * kernel(0,2,0) + input(i,j+1,k+2,l+0,m) * kernel(1,2,0) + input(i,j+2,k+2,l+0,m) * kernel(2,2,0) + input(i,j+0,k+3,l+0,m) * kernel(0,3,0) + input(i,j+1,k+3,l+0,m) * kernel(1,3,0) + input(i,j+2,k+3,l+0,m) * kernel(2,3,0) + input(i,j+0,k+0,l+1,m) * kernel(0,0,1) + input(i,j+1,k+0,l+1,m) * kernel(1,0,1) + input(i,j+2,k+0,l+1,m) * kernel(2,0,1) + input(i,j+0,k+1,l+1,m) * kernel(0,1,1) + input(i,j+1,k+1,l+1,m) * kernel(1,1,1) + input(i,j+2,k+1,l+1,m) * kernel(2,1,1) + input(i,j+0,k+2,l+1,m) * kernel(0,2,1) + input(i,j+1,k+2,l+1,m) * kernel(1,2,1) + input(i,j+2,k+2,l+1,m) * kernel(2,2,1) + input(i,j+0,k+3,l+1,m) * kernel(0,3,1) + input(i,j+1,k+3,l+1,m) * kernel(1,3,1) + input(i,j+2,k+3,l+1,m) * kernel(2,3,1); VERIFY_IS_APPROX(result, expected); } } } } } cudaFree(d_input); cudaFree(d_kernel); cudaFree(d_out); } template <typename Scalar> void test_cuda_lgamma(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.lgamma(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::lgamma)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_digamma() { Tensor<Scalar, 1> in(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in(0) = Scalar(1); in(1) = Scalar(1.5); in(2) = Scalar(4); in(3) = Scalar(-10.5); in(4) = Scalar(10000.5); in(5) = Scalar(0); in(6) = Scalar(-1); expected_out(0) = Scalar(-0.5772156649015329); expected_out(1) = Scalar(0.03648997397857645); expected_out(2) = Scalar(1.2561176684318); expected_out(3) = Scalar(2.398239129535781); expected_out(4) = Scalar(9.210340372392849); expected_out(5) = std::numeric_limits<Scalar>::infinity(); expected_out(6) = std::numeric_limits<Scalar>::infinity(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in(d_in, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in.digamma(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 5; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } for (int i = 5; i < 7; ++i) { VERIFY_IS_EQUAL(out(i), expected_out(i)); } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_zeta() { Tensor<Scalar, 1> in_x(6); Tensor<Scalar, 1> in_q(6); Tensor<Scalar, 1> out(6); Tensor<Scalar, 1> expected_out(6); out.setZero(); in_x(0) = Scalar(1); in_x(1) = Scalar(1.5); in_x(2) = Scalar(4); in_x(3) = Scalar(-10.5); in_x(4) = Scalar(10000.5); in_x(5) = Scalar(3); in_q(0) = Scalar(1.2345); in_q(1) = Scalar(2); in_q(2) = Scalar(1.5); in_q(3) = Scalar(3); in_q(4) = Scalar(1.0001); in_q(5) = Scalar(-2.5); expected_out(0) = std::numeric_limits<Scalar>::infinity(); expected_out(1) = Scalar(1.61237534869); expected_out(2) = Scalar(0.234848505667); expected_out(3) = Scalar(1.03086757337e-5); expected_out(4) = Scalar(0.367879440865); expected_out(5) = Scalar(0.054102025820864097); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_q; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_q), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_q, in_q.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_q(d_in_q, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 6); gpu_out.device(gpu_device) = gpu_in_x.zeta(gpu_in_q); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); VERIFY_IS_EQUAL(out(0), expected_out(0)); VERIFY((std::isnan)(out(3))); for (int i = 1; i < 6; ++i) { if (i != 3) { VERIFY_IS_APPROX(out(i), expected_out(i)); } } cudaFree(d_in_x); cudaFree(d_in_q); cudaFree(d_out); } template <typename Scalar> void test_cuda_polygamma() { Tensor<Scalar, 1> in_x(7); Tensor<Scalar, 1> in_n(7); Tensor<Scalar, 1> out(7); Tensor<Scalar, 1> expected_out(7); out.setZero(); in_n(0) = Scalar(1); in_n(1) = Scalar(1); in_n(2) = Scalar(1); in_n(3) = Scalar(17); in_n(4) = Scalar(31); in_n(5) = Scalar(28); in_n(6) = Scalar(8); in_x(0) = Scalar(2); in_x(1) = Scalar(3); in_x(2) = Scalar(25.5); in_x(3) = Scalar(4.7); in_x(4) = Scalar(11.8); in_x(5) = Scalar(17.7); in_x(6) = Scalar(30.2); expected_out(0) = Scalar(0.644934066848); expected_out(1) = Scalar(0.394934066848); expected_out(2) = Scalar(0.0399946696496); expected_out(3) = Scalar(293.334565435); expected_out(4) = Scalar(0.445487887616); expected_out(5) = Scalar(-2.47810300902e-07); expected_out(6) = Scalar(-8.29668781082e-09); std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_n; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_n), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_n, in_n.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_n(d_in_n, 7); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 7); gpu_out.device(gpu_device) = gpu_in_n.polygamma(gpu_in_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 7; ++i) { VERIFY_IS_APPROX(out(i), expected_out(i)); } cudaFree(d_in_x); cudaFree(d_in_n); cudaFree(d_out); } template <typename Scalar> void test_cuda_igamma() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igamma_s[][6] = {{0.0, nan, nan, nan, nan, nan}, {0.0, 0.6321205588285578, 0.7768698398515702, 0.9816843611112658, 9.999500016666262e-05, 1.0}, {0.0, 0.4275932955291202, 0.608374823728911, 0.9539882943107686, 7.522076445089201e-07, 1.0}, {0.0, 0.01898815687615381, 0.06564245437845008, 0.5665298796332909, 4.166333347221828e-18, 1.0}, {0.0, 0.9999780593618628, 0.9999899967080838, 0.9999996219837988, 0.9991370418689945, 1.0}, {0.0, 0.0, 0.0, 0.0, 0.0, 0.5042041932513908}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; assert(cudaMalloc((void**)(&d_a), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_x), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess); cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igamma(gpu_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igamma_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igamma_s[i][j]); } } } cudaFree(d_a); cudaFree(d_x); cudaFree(d_out); } template <typename Scalar> void test_cuda_igammac() { Tensor<Scalar, 2> a(6, 6); Tensor<Scalar, 2> x(6, 6); Tensor<Scalar, 2> out(6, 6); out.setZero(); Scalar a_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; Scalar x_s[] = {Scalar(0), Scalar(1), Scalar(1.5), Scalar(4), Scalar(0.0001), Scalar(1000.5)}; for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { a(i, j) = a_s[i]; x(i, j) = x_s[j]; } } Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Scalar igammac_s[][6] = {{nan, nan, nan, nan, nan, nan}, {1.0, 0.36787944117144233, 0.22313016014842982, 0.018315638888734182, 0.9999000049998333, 0.0}, {1.0, 0.5724067044708798, 0.3916251762710878, 0.04601170568923136, 0.9999992477923555, 0.0}, {1.0, 0.9810118431238462, 0.9343575456215499, 0.4334701203667089, 1.0, 0.0}, {1.0, 2.1940638138146658e-05, 1.0003291916285e-05, 3.7801620118431334e-07, 0.0008629581310054535, 0.0}, {1.0, 1.0, 1.0, 1.0, 1.0, 0.49579580674813944}}; std::size_t bytes = a.size() * sizeof(Scalar); Scalar* d_a; Scalar* d_x; Scalar* d_out; cudaMalloc((void**)(&d_a), bytes); cudaMalloc((void**)(&d_x), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_x, x.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_a(d_a, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_x(d_x, 6, 6); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 6, 6); gpu_out.device(gpu_device) = gpu_a.igammac(gpu_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 6; ++i) { for (int j = 0; j < 6; ++j) { if ((std::isnan)(igammac_s[i][j])) { VERIFY((std::isnan)(out(i, j))); } else { VERIFY_IS_APPROX(out(i, j), igammac_s[i][j]); } } } cudaFree(d_a); cudaFree(d_x); cudaFree(d_out); } template <typename Scalar> void test_cuda_erf(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; assert(cudaMalloc((void**)(&d_in), bytes) == cudaSuccess); assert(cudaMalloc((void**)(&d_out), bytes) == cudaSuccess); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erf(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erf)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_erfc(const Scalar stddev) { Tensor<Scalar, 2> in(72,97); in.setRandom(); in *= in.constant(stddev); Tensor<Scalar, 2> out(72,97); out.setZero(); std::size_t bytes = in.size() * sizeof(Scalar); Scalar* d_in; Scalar* d_out; cudaMalloc((void**)(&d_in), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in, in.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_in(d_in, 72, 97); Eigen::TensorMap<Eigen::Tensor<Scalar, 2> > gpu_out(d_out, 72, 97); gpu_out.device(gpu_device) = gpu_in.erfc(); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 72; ++i) { for (int j = 0; j < 97; ++j) { VERIFY_IS_APPROX(out(i,j), (std::erfc)(in(i,j))); } } cudaFree(d_in); cudaFree(d_out); } template <typename Scalar> void test_cuda_betainc() { Tensor<Scalar, 1> in_x(125); Tensor<Scalar, 1> in_a(125); Tensor<Scalar, 1> in_b(125); Tensor<Scalar, 1> out(125); Tensor<Scalar, 1> expected_out(125); out.setZero(); Scalar nan = std::numeric_limits<Scalar>::quiet_NaN(); Array<Scalar, 1, Dynamic> x(125); Array<Scalar, 1, Dynamic> a(125); Array<Scalar, 1, Dynamic> b(125); Array<Scalar, 1, Dynamic> v(125); a << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999, 999.999; b << 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.03062277660168379, 0.999, 0.999, 0.999, 0.999, 0.999, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 31.62177660168379, 999.999, 999.999, 999.999, 999.999, 999.999; x << -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1, -0.1, 0.2, 0.5, 0.8, 1.1; v << nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.47972119876364683, 0.5, 0.5202788012363533, nan, nan, 0.9518683957740043, 0.9789663010413743, 0.9931729188073435, nan, nan, 0.999995949033062, 0.9999999999993698, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.006827081192655869, 0.0210336989586256, 0.04813160422599567, nan, nan, 0.20014344256217678, 0.5000000000000001, 0.7998565574378232, nan, nan, 0.9991401428435834, 0.999999999698403, 0.9999999999999999, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 1.0646600232370887e-25, 6.301722877826246e-13, 4.050966937974938e-06, nan, nan, 7.864342668429763e-23, 3.015969667594166e-10, 0.0008598571564165444, nan, nan, 6.031987710123844e-08, 0.5000000000000007, 0.9999999396801229, nan, nan, 0.9999999999999999, 0.9999999999999999, 0.9999999999999999, nan, nan, nan, nan, nan, nan, nan, 0.0, 7.029920380986636e-306, 2.2450728208591345e-101, nan, nan, 0.0, 9.275871147869727e-302, 1.2232913026152827e-97, nan, nan, 0.0, 3.0891393081932924e-252, 2.9303043666183996e-60, nan, nan, 2.248913486879199e-196, 0.5000000000004947, 0.9999999999999999, nan; for (int i = 0; i < 125; ++i) { in_x(i) = x(i); in_a(i) = a(i); in_b(i) = b(i); expected_out(i) = v(i); } std::size_t bytes = in_x.size() * sizeof(Scalar); Scalar* d_in_x; Scalar* d_in_a; Scalar* d_in_b; Scalar* d_out; cudaMalloc((void**)(&d_in_x), bytes); cudaMalloc((void**)(&d_in_a), bytes); cudaMalloc((void**)(&d_in_b), bytes); cudaMalloc((void**)(&d_out), bytes); cudaMemcpy(d_in_x, in_x.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_a, in_a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in_b, in_b.data(), bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_x(d_in_x, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_a(d_in_a, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_in_b(d_in_b, 125); Eigen::TensorMap<Eigen::Tensor<Scalar, 1> > gpu_out(d_out, 125); gpu_out.device(gpu_device) = betainc(gpu_in_a, gpu_in_b, gpu_in_x); assert(cudaMemcpyAsync(out.data(), d_out, bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 1; i < 125; ++i) { if ((std::isnan)(expected_out(i))) { VERIFY((std::isnan)(out(i))); } else { VERIFY_IS_APPROX(out(i), expected_out(i)); } } cudaFree(d_in_x); cudaFree(d_in_a); cudaFree(d_in_b); cudaFree(d_out); } void test_cxx11_tensor_cuda() { CALL_SUBTEST_1(test_cuda_nullary()); CALL_SUBTEST_1(test_cuda_elementwise_small()); CALL_SUBTEST_1(test_cuda_elementwise()); CALL_SUBTEST_1(test_cuda_props()); CALL_SUBTEST_1(test_cuda_reduction()); CALL_SUBTEST_2(test_cuda_contraction<ColMajor>()); CALL_SUBTEST_2(test_cuda_contraction<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_1d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_col_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_inner_dim_row_major_1d()); CALL_SUBTEST_3(test_cuda_convolution_2d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_2d<RowMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<ColMajor>()); CALL_SUBTEST_3(test_cuda_convolution_3d<RowMajor>()); #if __cplusplus > 199711L // std::erf, std::erfc, and so on where only added in c++11. We use them // as a golden reference to validate the results produced by Eigen. Therefore // we can only run these tests if we use a c++11 compiler. CALL_SUBTEST_4(test_cuda_lgamma<float>(1.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(100.0f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.01f)); CALL_SUBTEST_4(test_cuda_lgamma<float>(0.001f)); CALL_SUBTEST_4(test_cuda_lgamma<double>(1.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(100.0)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.01)); CALL_SUBTEST_4(test_cuda_lgamma<double>(0.001)); CALL_SUBTEST_4(test_cuda_erf<float>(1.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erf<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erfc<float>(1.0f)); // CALL_SUBTEST(test_cuda_erfc<float>(100.0f)); CALL_SUBTEST_4(test_cuda_erfc<float>(5.0f)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<float>(0.01f)); CALL_SUBTEST_4(test_cuda_erfc<float>(0.001f)); CALL_SUBTEST_4(test_cuda_erf<double>(1.0)); CALL_SUBTEST_4(test_cuda_erf<double>(100.0)); CALL_SUBTEST_4(test_cuda_erf<double>(0.01)); CALL_SUBTEST_4(test_cuda_erf<double>(0.001)); CALL_SUBTEST_4(test_cuda_erfc<double>(1.0)); // CALL_SUBTEST(test_cuda_erfc<double>(100.0)); CALL_SUBTEST_4(test_cuda_erfc<double>(5.0)); // CUDA erfc lacks precision for large inputs CALL_SUBTEST_4(test_cuda_erfc<double>(0.01)); CALL_SUBTEST_4(test_cuda_erfc<double>(0.001)); CALL_SUBTEST_5(test_cuda_digamma<float>()); CALL_SUBTEST_5(test_cuda_digamma<double>()); CALL_SUBTEST_5(test_cuda_polygamma<float>()); CALL_SUBTEST_5(test_cuda_polygamma<double>()); CALL_SUBTEST_5(test_cuda_zeta<float>()); CALL_SUBTEST_5(test_cuda_zeta<double>()); CALL_SUBTEST_5(test_cuda_igamma<float>()); CALL_SUBTEST_5(test_cuda_igammac<float>()); CALL_SUBTEST_5(test_cuda_igamma<double>()); CALL_SUBTEST_5(test_cuda_igammac<double>()); CALL_SUBTEST_6(test_cuda_betainc<float>()); CALL_SUBTEST_6(test_cuda_betainc<double>()); #endif }
a3490734a07eec4e3e4e1228138a73b134b3bd5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,int var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float* var_26,float* var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) { if (comp >= log10f((var_1 - var_2))) { if (comp < -1.2261E19f + coshf(var_3 * +0.0f * (var_4 * (-1.4165E-25f * var_5)))) { comp += var_7 - (var_8 + var_9 - (-0.0f * -1.2062E23f + +1.4310E-41f)); comp += var_10 + var_11 + (-1.0789E-35f + var_12 - +1.3540E35f - +1.2578E-36f); if (comp > -1.3189E36f + +1.2720E36f - (var_13 * (-1.6585E36f / var_14 - var_15))) { comp += var_16 - (var_17 * (var_18 * acosf((+0.0f / var_19 - -1.2988E-22f / var_20)))); } if (comp <= (var_21 * -1.9488E-29f * -1.2776E36f)) { comp = -0.0f * sinf(+1.4719E26f); float tmp_1 = logf(+1.3238E-44f); comp = tmp_1 + +1.3141E35f + (var_22 / (var_23 - -0.0f + (var_24 * var_25))); } for (int i=0; i < var_6; ++i) { var_26[i] = +0.0f; float tmp_2 = +1.1754E-17f; var_27[i] = (var_28 - log10f(var_29 - (-1.3263E-36f - (var_30 / +1.1926E-42f)))); comp = var_27[i] / tmp_2 * var_26[i] / (var_31 - (+1.1612E-36f / (var_32 * var_33 + (-1.0845E-36f * -1.6945E-44f)))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); int tmp_7 = atoi(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float* tmp_27 = initPointer( atof(argv[27]) ); float* tmp_28 = initPointer( atof(argv[28]) ); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); float tmp_32 = atof(argv[32]); float tmp_33 = atof(argv[33]); float tmp_34 = atof(argv[34]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34); hipDeviceSynchronize(); return 0; }
a3490734a07eec4e3e4e1228138a73b134b3bd5f.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,int var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float* var_26,float* var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) { if (comp >= log10f((var_1 - var_2))) { if (comp < -1.2261E19f + coshf(var_3 * +0.0f * (var_4 * (-1.4165E-25f * var_5)))) { comp += var_7 - (var_8 + var_9 - (-0.0f * -1.2062E23f + +1.4310E-41f)); comp += var_10 + var_11 + (-1.0789E-35f + var_12 - +1.3540E35f - +1.2578E-36f); if (comp > -1.3189E36f + +1.2720E36f - (var_13 * (-1.6585E36f / var_14 - var_15))) { comp += var_16 - (var_17 * (var_18 * acosf((+0.0f / var_19 - -1.2988E-22f / var_20)))); } if (comp <= (var_21 * -1.9488E-29f * -1.2776E36f)) { comp = -0.0f * sinf(+1.4719E26f); float tmp_1 = logf(+1.3238E-44f); comp = tmp_1 + +1.3141E35f + (var_22 / (var_23 - -0.0f + (var_24 * var_25))); } for (int i=0; i < var_6; ++i) { var_26[i] = +0.0f; float tmp_2 = +1.1754E-17f; var_27[i] = (var_28 - log10f(var_29 - (-1.3263E-36f - (var_30 / +1.1926E-42f)))); comp = var_27[i] / tmp_2 * var_26[i] / (var_31 - (+1.1612E-36f / (var_32 * var_33 + (-1.0845E-36f * -1.6945E-44f)))); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); int tmp_7 = atoi(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float* tmp_27 = initPointer( atof(argv[27]) ); float* tmp_28 = initPointer( atof(argv[28]) ); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); float tmp_32 = atof(argv[32]); float tmp_33 = atof(argv[33]); float tmp_34 = atof(argv[34]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34); cudaDeviceSynchronize(); return 0; }
0457961f645a2851ff6460a1ae1a46cb8f6b7647.hip
// !!! This is a file automatically generated by hipify!!! #include "MyMatrix.h" #include "kernel.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" using namespace std; // this amazingly nice error checking function is stolen from: //https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); // note, the GPU memory will not be leaked: https://stackoverflow.com/questions/52651392/cuda-does-cudamallocmanaged-deal-with-exits if (abort) exit(code); } } // multiplies two matrices and returns a new matrix // input two mymatrix objects // output mymatrix MyMatrix MyMatrix::CUDAMatMatMultiply(MyMatrix *Mat1, MyMatrix *Mat2) { // create cuda events for timing float elapsedTimeExecution; hipEvent_t startExec, stopExec; hipEventCreate(&startExec); hipEventCreate(&stopExec); // various paramaters we need dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1); int Arows = Mat1->rows; int Acols = Mat1->cols; int Bcols = Mat2->cols; // BLOCKSIZE in MyMatrix.h, currently 32 // TODO: fix the case where this blows up? double threadsPerBlock = ceil( (double) Bcols / BLOCKSIZE); if(threadsPerBlock > 1024) { cout << "THIS WILL NOT WORK! threadsPerBlock exceeds maximum" << endl << flush; // note, the GPU memory will not be leaked: https://stackoverflow.com/questions/52651392/cuda-does-cudamallocmanaged-deal-with-exits exit(1); } double blocksPerGrid = ceil((double) Bcols/BLOCKSIZE); dim3 dimGrid(threadsPerBlock, blocksPerGrid); // Output matrix MyMatrix OutputMat(Arows, Bcols, Mat1->padr, Mat2->padc); // ready to preform a kernel; record that this even is happeneing gpuErrchk(hipEventRecord(startExec, 0)); // CUDA KERNEL CALL hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, OutputMat.data, Mat1->data, Mat2->data, Arows, Acols, Bcols); // hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. gpuErrchk(hipDeviceSynchronize()); // find the time the execution took gpuErrchk(hipEventRecord(stopExec, 0)); gpuErrchk(hipEventSynchronize(stopExec)); gpuErrchk(hipEventElapsedTime(&elapsedTimeExecution, startExec, stopExec)); cout << "Using Cuda Timers, the total kernel execution time was " << elapsedTimeExecution << "ms" << endl; return OutputMat; } // Raise a Matrix to a power using CUDA // intput, mymatrix, int times // output, mymatrix // NOTE: This function does not yet use the newly found cutesy gpuerrchck, because it has memory that must be freed (a temp matrix) and we cannot just exit. MyMatrix MyMatrix::CUDAMatPower(MyMatrix *Mat1, int TIMES) { // create cuda events for timing hipError_t cudaStatus; float elapsedTimeExecution; hipEvent_t startExec, stopExec; hipEventCreate(&startExec); hipEventCreate(&stopExec); // block and thread size stuff dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1); int width = Mat1->rows; dim3 dimGrid(ceil((double) width/BLOCKSIZE), ceil((double) width/BLOCKSIZE)); int matsize = width*width*sizeof(double); // GPU device pointers double *GPUTempMat; // Output matrix MyMatrix OutputMat(width, width, Mat1->padr, Mat1->padc); // pointers to matrix data elements double *outmatptr = OutputMat.data; // pass this on all subsequent squares // ready to preform a kernel; record that this even is happeneing cudaStatus = hipEventRecord(startExec, 0); if (cudaStatus != hipSuccess){ fprintf(stderr, "event record failure!"); goto Error;} // Allocate GPU buffers for three vectors in GPU global Memory cudaStatus = hipMalloc((void**)&GPUTempMat, matsize); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!");goto Error;} // keep squaring until the total number is greater than the orginal number asked for for (double T = 2; T <= TIMES; T *=2) { // on the first time pass in the matrix data so it can be squared if (T == 2) hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, outmatptr, Mat1->data, Mat1->data, width, width, width); else { cudaStatus = hipMemcpy(GPUTempMat, outmatptr, matsize, hipMemcpyDeviceToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, outmatptr, GPUTempMat, GPUTempMat, width, width, width); } // hipDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) {fprintf(stderr, "hipDeviceSynchronize returned error code %d\n", cudaStatus); goto Error;} } // find the time the execution took hipEventRecord(stopExec, 0); hipEventSynchronize(stopExec); if (cudaStatus != hipSuccess) {fprintf(stderr, "event record failure!"); goto Error; } cudaStatus = hipEventElapsedTime(&elapsedTimeExecution, startExec, stopExec); if (cudaStatus != hipSuccess) {fprintf(stderr, "hipEventElapsedTime returned error code %d!\n", cudaStatus); goto Error;} cout << "Using Cuda Timers, the total kernel execution time was " << elapsedTimeExecution << "ms" << endl; Error: // either we have errd, or the program finished natrually. // either way, free all device memory useage! hipEventDestroy(startExec); hipEventDestroy(stopExec); hipFree(GPUTempMat); return OutputMat; } __global__ void addKernel(double *c, const double *a, const double *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // multiplies two matrices and outputs a new matrix // input: two mymatrix data doubles // output none really, mymatrix output double __global__ void MatrixMulKernel(double *OutMat, double *Mat1, double *Mat2, int Arows, int Acols, int Bcols) { // row and column within submatrix int blockrow = blockIdx.y;//* int row = threadIdx.y; int blockcol = blockIdx.x; int col = threadIdx.x ; // allocate these arrays only once we can change the values in them later __shared__ double subAshared[BLOCKSIZE*BLOCKSIZE]; __shared__ double subBshared[BLOCKSIZE*BLOCKSIZE]; double Cvalue=0; for (int B = 0; B < ceil((double)(Acols / BLOCKSIZE)) + 1; B++) { // fetch from global memory // yes, these took a LONG time to figure out. Pencil and Paper FTW! /* notice: 1) how these indexes are actually offset a multiple of B, *not 1*. 2) threads are offset by col which will be 1 apart for each thread 3) which means that means all threads in the warp are hitting successive global memory cells */ int Mat1index = (row + blockrow*BLOCKSIZE)*Acols + col + B*BLOCKSIZE; int Mat2index = (B*BLOCKSIZE + row)*Bcols + BLOCKSIZE*blockcol + col; if (Mat1index < Arows*Acols) subAshared[row*BLOCKSIZE + col] = Mat1[Mat1index]; else subAshared[row*BLOCKSIZE + col] = 0; if (Mat2index < Acols*Bcols) subBshared[row*BLOCKSIZE + col] = Mat2[Mat2index]; else subBshared[row*BLOCKSIZE + col] = 0; __syncthreads(); // this computation is all using shared memory (fast) for (int j = 0; j < BLOCKSIZE; j++) if ((row*BLOCKSIZE + j < BLOCKSIZE*BLOCKSIZE) && (j*BLOCKSIZE + col < BLOCKSIZE*BLOCKSIZE)) Cvalue += subAshared[row*BLOCKSIZE + j]*subBshared[j*BLOCKSIZE + col]; __syncthreads(); } if ((row < Arows) && (col < Bcols)) { int finalmatrow = blockrow*BLOCKSIZE + row; int finalmatcol = blockcol*BLOCKSIZE + col; OutMat[finalmatrow*Bcols + finalmatcol] = Cvalue; } }
0457961f645a2851ff6460a1ae1a46cb8f6b7647.cu
#include "MyMatrix.h" #include "kernel.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; // this amazingly nice error checking function is stolen from: //https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); // note, the GPU memory will not be leaked: https://stackoverflow.com/questions/52651392/cuda-does-cudamallocmanaged-deal-with-exits if (abort) exit(code); } } // multiplies two matrices and returns a new matrix // input two mymatrix objects // output mymatrix MyMatrix MyMatrix::CUDAMatMatMultiply(MyMatrix *Mat1, MyMatrix *Mat2) { // create cuda events for timing float elapsedTimeExecution; cudaEvent_t startExec, stopExec; cudaEventCreate(&startExec); cudaEventCreate(&stopExec); // various paramaters we need dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1); int Arows = Mat1->rows; int Acols = Mat1->cols; int Bcols = Mat2->cols; // BLOCKSIZE in MyMatrix.h, currently 32 // TODO: fix the case where this blows up? double threadsPerBlock = ceil( (double) Bcols / BLOCKSIZE); if(threadsPerBlock > 1024) { cout << "THIS WILL NOT WORK! threadsPerBlock exceeds maximum" << endl << flush; // note, the GPU memory will not be leaked: https://stackoverflow.com/questions/52651392/cuda-does-cudamallocmanaged-deal-with-exits exit(1); } double blocksPerGrid = ceil((double) Bcols/BLOCKSIZE); dim3 dimGrid(threadsPerBlock, blocksPerGrid); // Output matrix MyMatrix OutputMat(Arows, Bcols, Mat1->padr, Mat2->padc); // ready to preform a kernel; record that this even is happeneing gpuErrchk(cudaEventRecord(startExec, 0)); // CUDA KERNEL CALL MatrixMulKernel<<< dimGrid, dimBlock>>>(OutputMat.data, Mat1->data, Mat2->data, Arows, Acols, Bcols); // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. gpuErrchk(cudaDeviceSynchronize()); // find the time the execution took gpuErrchk(cudaEventRecord(stopExec, 0)); gpuErrchk(cudaEventSynchronize(stopExec)); gpuErrchk(cudaEventElapsedTime(&elapsedTimeExecution, startExec, stopExec)); cout << "Using Cuda Timers, the total kernel execution time was " << elapsedTimeExecution << "ms" << endl; return OutputMat; } // Raise a Matrix to a power using CUDA // intput, mymatrix, int times // output, mymatrix // NOTE: This function does not yet use the newly found cutesy gpuerrchck, because it has memory that must be freed (a temp matrix) and we cannot just exit. MyMatrix MyMatrix::CUDAMatPower(MyMatrix *Mat1, int TIMES) { // create cuda events for timing cudaError_t cudaStatus; float elapsedTimeExecution; cudaEvent_t startExec, stopExec; cudaEventCreate(&startExec); cudaEventCreate(&stopExec); // block and thread size stuff dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1); int width = Mat1->rows; dim3 dimGrid(ceil((double) width/BLOCKSIZE), ceil((double) width/BLOCKSIZE)); int matsize = width*width*sizeof(double); // GPU device pointers double *GPUTempMat; // Output matrix MyMatrix OutputMat(width, width, Mat1->padr, Mat1->padc); // pointers to matrix data elements double *outmatptr = OutputMat.data; // pass this on all subsequent squares // ready to preform a kernel; record that this even is happeneing cudaStatus = cudaEventRecord(startExec, 0); if (cudaStatus != cudaSuccess){ fprintf(stderr, "event record failure!"); goto Error;} // Allocate GPU buffers for three vectors in GPU global Memory cudaStatus = cudaMalloc((void**)&GPUTempMat, matsize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!");goto Error;} // keep squaring until the total number is greater than the orginal number asked for for (double T = 2; T <= TIMES; T *=2) { // on the first time pass in the matrix data so it can be squared if (T == 2) MatrixMulKernel<<< dimGrid, dimBlock>>>(outmatptr, Mat1->data, Mat1->data, width, width, width); else { cudaStatus = cudaMemcpy(GPUTempMat, outmatptr, matsize, cudaMemcpyDeviceToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } MatrixMulKernel<<< dimGrid, dimBlock>>>(outmatptr, GPUTempMat, GPUTempMat, width, width, width); } // cudaDeviceSynchronize waits for the kernel to finish, and returns any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaDeviceSynchronize returned error code %d\n", cudaStatus); goto Error;} } // find the time the execution took cudaEventRecord(stopExec, 0); cudaEventSynchronize(stopExec); if (cudaStatus != cudaSuccess) {fprintf(stderr, "event record failure!"); goto Error; } cudaStatus = cudaEventElapsedTime(&elapsedTimeExecution, startExec, stopExec); if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaEventElapsedTime returned error code %d!\n", cudaStatus); goto Error;} cout << "Using Cuda Timers, the total kernel execution time was " << elapsedTimeExecution << "ms" << endl; Error: // either we have errd, or the program finished natrually. // either way, free all device memory useage! cudaEventDestroy(startExec); cudaEventDestroy(stopExec); cudaFree(GPUTempMat); return OutputMat; } __global__ void addKernel(double *c, const double *a, const double *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // multiplies two matrices and outputs a new matrix // input: two mymatrix data doubles // output none really, mymatrix output double __global__ void MatrixMulKernel(double *OutMat, double *Mat1, double *Mat2, int Arows, int Acols, int Bcols) { // row and column within submatrix int blockrow = blockIdx.y;//* int row = threadIdx.y; int blockcol = blockIdx.x; int col = threadIdx.x ; // allocate these arrays only once we can change the values in them later __shared__ double subAshared[BLOCKSIZE*BLOCKSIZE]; __shared__ double subBshared[BLOCKSIZE*BLOCKSIZE]; double Cvalue=0; for (int B = 0; B < ceil((double)(Acols / BLOCKSIZE)) + 1; B++) { // fetch from global memory // yes, these took a LONG time to figure out. Pencil and Paper FTW! /* notice: 1) how these indexes are actually offset a multiple of B, *not 1*. 2) threads are offset by col which will be 1 apart for each thread 3) which means that means all threads in the warp are hitting successive global memory cells */ int Mat1index = (row + blockrow*BLOCKSIZE)*Acols + col + B*BLOCKSIZE; int Mat2index = (B*BLOCKSIZE + row)*Bcols + BLOCKSIZE*blockcol + col; if (Mat1index < Arows*Acols) subAshared[row*BLOCKSIZE + col] = Mat1[Mat1index]; else subAshared[row*BLOCKSIZE + col] = 0; if (Mat2index < Acols*Bcols) subBshared[row*BLOCKSIZE + col] = Mat2[Mat2index]; else subBshared[row*BLOCKSIZE + col] = 0; __syncthreads(); // this computation is all using shared memory (fast) for (int j = 0; j < BLOCKSIZE; j++) if ((row*BLOCKSIZE + j < BLOCKSIZE*BLOCKSIZE) && (j*BLOCKSIZE + col < BLOCKSIZE*BLOCKSIZE)) Cvalue += subAshared[row*BLOCKSIZE + j]*subBshared[j*BLOCKSIZE + col]; __syncthreads(); } if ((row < Arows) && (col < Bcols)) { int finalmatrow = blockrow*BLOCKSIZE + row; int finalmatcol = blockcol*BLOCKSIZE + col; OutMat[finalmatrow*Bcols + finalmatcol] = Cvalue; } }
73734c437e9e4bd9fcad0c3332b13b4be68b5e63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <cstdlib> #include <ctime> #define NUM_POINTS 33554432 // 1 GB of 32-bit floats float cpu_dataset[NUM_POINTS]; float cpu2_dataset[NUM_POINTS]; __global__ void hitAtomic(float* where) { atomicAdd(where, 1.0); } __global__ void hitAtomicBy32(float* where) { atomicAdd(&where[threadIdx.x % 32], 1.0); } __global__ void hitAtomicThreadLocal(float* where) { atomicAdd(&where[threadIdx.x], 1.0); } __global__ void hitNaiveThreadLocal(float* where) { where[threadIdx.x] += 1.0; } __global__ void inplaceOperation(float* data) { int id = threadIdx.x + blockIdx.x * blockDim.x; data[id] = 1.0 - data[id]; } __global__ void immutableOperation(float* datain, float* dataout) { int id = threadIdx.x + blockIdx.x * blockDim.x; dataout[id] = 1.0 - datain[id]; } __global__ void constImmutableOperation(const float* datain, float* dataout) { int id = threadIdx.x + blockIdx.x * blockDim.x; dataout[id] = 1.0 - datain[id]; } int main(int argc, char** argv) { srand(12345); for (int i = 0; i < NUM_POINTS; i++) cpu_dataset[i] = ((float)rand()) / RAND_MAX; struct hipDeviceProp_t cdp; hipGetDeviceProperties(&cdp, 0); std::cout << "Device at 0:" << std::endl; std::cout << " name: " << cdp.name << std::endl; std::cout << " totalGlobalMem: " << cdp.totalGlobalMem / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl; std::cout << " sharedMemPerBlock: " << cdp.sharedMemPerBlock / 1024.0 << " kB" << std::endl; std::cout << " regsPerBlock: " << cdp.regsPerBlock << std::endl; std::cout << " warpSize: " << cdp.warpSize << std::endl; std::cout << " memPitch: " << cdp.memPitch / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl; std::cout << " maxThreadsPerBlock: " << cdp.maxThreadsPerBlock << std::endl; std::cout << " maxThreadsDim: " << cdp.maxThreadsDim[0] << " " << cdp.maxThreadsDim[1] << " " << cdp.maxThreadsDim[2] << " " << std::endl; std::cout << " maxGridSize: " << cdp.maxGridSize[0] << " " << cdp.maxGridSize[1] << " " << cdp.maxGridSize[2] << " " << std::endl; std::cout << " totalConstMem: " << cdp.totalConstMem / 1024.0 << " kB" << std::endl; std::cout << " version: " << cdp.major << "." << cdp.minor << std::endl; std::cout << " clockRate: " << cdp.clockRate / 1000.0 << " MHz" << std::endl; std::cout << " textureAlignment: " << cdp.textureAlignment << std::endl; std::cout << " deviceOverlap: " << (cdp.deviceOverlap ? "true" : "false") << std::endl; std::cout << " multiProcessorCount: " << cdp.multiProcessorCount << std::endl; std::cout << " kernelExecTimeoutEnabled: " << (cdp.kernelExecTimeoutEnabled ? "true" : "false") << std::endl; std::cout << " integrated: " << (cdp.integrated ? "true" : "false") << std::endl; std::cout << " canMapHostMemory: " << (cdp.canMapHostMemory ? "true" : "false") << std::endl; std::cout << " computeMode: " << (cdp.computeMode == hipComputeModeDefault ? "hipComputeModeDefault" : (cdp.computeMode == hipComputeModeExclusive ? "hipComputeModeExclusive" : (cdp.computeMode == hipComputeModeProhibited ? "hipComputeModeProhibited" : "unknown"))) << std::endl; std::cout << " concurrentKernels: " << (cdp.concurrentKernels ? "true" : "false") << std::endl; std::cout << " ECCEnabled: " << (cdp.ECCEnabled ? "true" : "false") << std::endl; std::cout << " pciBusID: " << cdp.pciBusID << std::endl; std::cout << " pciDeviceID: " << cdp.pciDeviceID << std::endl; std::cout << " tccDriver: " << (cdp.tccDriver ? "true" : "false") << std::endl; std::cout << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(cpu2_dataset, cpu_dataset, NUM_POINTS * 4); std::cout << "1 GB host -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } float* gpu_dataset; float* gpu2_dataset; hipMalloc((void**)&gpu_dataset, NUM_POINTS * 4); hipMalloc((void**)&gpu2_dataset, NUM_POINTS * 4); std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipMemcpy(gpu_dataset, cpu_dataset, NUM_POINTS * 4, hipMemcpyHostToDevice); hipDeviceSynchronize(); std::cout << "1 GB host -> device: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( inplaceOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, gpu_dataset); hipDeviceSynchronize(); std::cout << "1 GB device in-place operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( immutableOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, gpu_dataset, gpu2_dataset); hipDeviceSynchronize(); std::cout << "1 GB device immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( constImmutableOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, gpu_dataset, gpu2_dataset); hipDeviceSynchronize(); std::cout << "1 GB device const immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipMemcpy(cpu_dataset, gpu_dataset, NUM_POINTS * 4, hipMemcpyDeviceToHost); hipDeviceSynchronize(); std::cout << "1 GB device -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; float* pinned_dataset; hipHostMalloc((void**)&pinned_dataset, NUM_POINTS * 4); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(pinned_dataset, cpu_dataset, NUM_POINTS * 4); std::cout << "1 GB host -> pinned: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } float* mapped_dataset; hipHostGetDevicePointer((void**)&mapped_dataset, (void*)pinned_dataset, 0); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( inplaceOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, mapped_dataset); hipDeviceSynchronize(); std::cout << "1 GB device in-place operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( immutableOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, mapped_dataset, gpu2_dataset); hipDeviceSynchronize(); std::cout << "1 GB device immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( constImmutableOperation), dim3(NUM_POINTS / cdp.maxThreadsPerBlock), dim3(cdp.maxThreadsPerBlock), 0, 0, mapped_dataset, gpu2_dataset); hipDeviceSynchronize(); std::cout << "1 GB device const immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(cpu_dataset, pinned_dataset, NUM_POINTS * 4); std::cout << "1 GB pinned -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipMemcpy(gpu2_dataset, gpu_dataset, NUM_POINTS * 4, hipMemcpyDeviceToDevice); hipDeviceSynchronize(); std::cout << "1 GB device -> device: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } const float atomic_init[32] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; float* global_atomics; hipMalloc((void**)&global_atomics, 32 * 4); float cpu_atomics[32]; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomic), dim3(32768), dim3(32), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, 32>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 32 * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomic), dim3(32768), dim3(64), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, 64>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 64 * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomic), dim3(32768), dim3(cdp.maxThreadsPerBlock), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomicBy32), dim3(32768), dim3(32), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, 32>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 32 * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomicBy32), dim3(32768), dim3(64), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, 64>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 64 * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; hipMemcpy(global_atomics, atomic_init, 32 * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomicBy32), dim3(32768), dim3(cdp.maxThreadsPerBlock), 0, 0, global_atomics); hipDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics, global_atomics, 32 * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; float* atomic_init2 = (float*)malloc(cdp.maxThreadsPerBlock * 4); float* global_atomics2; hipMalloc((void**)&global_atomics2, cdp.maxThreadsPerBlock * 4); float* cpu_atomics2 = new float[cdp.maxThreadsPerBlock]; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) atomic_init2[i] = 0.0; hipMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitNaiveThreadLocal), dim3(32768), dim3(cdp.maxThreadsPerBlock), 0, 0, global_atomics2); hipDeviceSynchronize(); std::cout << "hit naive thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 5 << ": "; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) std::cout << cpu_atomics2[i] << " "; std::cout << std::endl; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) atomic_init2[i] = 0.0; hipMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, hipMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hipLaunchKernelGGL(( hitAtomicThreadLocal), dim3(32768), dim3(cdp.maxThreadsPerBlock), 0, 0, global_atomics2); hipDeviceSynchronize(); std::cout << "hit atomic thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } hipMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, hipMemcpyDeviceToHost); std::cout << "check for " << 32768 * 5 << ": "; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) std::cout << cpu_atomics2[i] << " "; std::cout << std::endl; return 0; }
73734c437e9e4bd9fcad0c3332b13b4be68b5e63.cu
#include <iostream> #include <cstdlib> #include <ctime> #define NUM_POINTS 33554432 // 1 GB of 32-bit floats float cpu_dataset[NUM_POINTS]; float cpu2_dataset[NUM_POINTS]; __global__ void hitAtomic(float* where) { atomicAdd(where, 1.0); } __global__ void hitAtomicBy32(float* where) { atomicAdd(&where[threadIdx.x % 32], 1.0); } __global__ void hitAtomicThreadLocal(float* where) { atomicAdd(&where[threadIdx.x], 1.0); } __global__ void hitNaiveThreadLocal(float* where) { where[threadIdx.x] += 1.0; } __global__ void inplaceOperation(float* data) { int id = threadIdx.x + blockIdx.x * blockDim.x; data[id] = 1.0 - data[id]; } __global__ void immutableOperation(float* datain, float* dataout) { int id = threadIdx.x + blockIdx.x * blockDim.x; dataout[id] = 1.0 - datain[id]; } __global__ void constImmutableOperation(const float* datain, float* dataout) { int id = threadIdx.x + blockIdx.x * blockDim.x; dataout[id] = 1.0 - datain[id]; } int main(int argc, char** argv) { srand(12345); for (int i = 0; i < NUM_POINTS; i++) cpu_dataset[i] = ((float)rand()) / RAND_MAX; struct cudaDeviceProp cdp; cudaGetDeviceProperties(&cdp, 0); std::cout << "Device at 0:" << std::endl; std::cout << " name: " << cdp.name << std::endl; std::cout << " totalGlobalMem: " << cdp.totalGlobalMem / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl; std::cout << " sharedMemPerBlock: " << cdp.sharedMemPerBlock / 1024.0 << " kB" << std::endl; std::cout << " regsPerBlock: " << cdp.regsPerBlock << std::endl; std::cout << " warpSize: " << cdp.warpSize << std::endl; std::cout << " memPitch: " << cdp.memPitch / 1024.0 / 1024.0 / 1024.0 << " GB" << std::endl; std::cout << " maxThreadsPerBlock: " << cdp.maxThreadsPerBlock << std::endl; std::cout << " maxThreadsDim: " << cdp.maxThreadsDim[0] << " " << cdp.maxThreadsDim[1] << " " << cdp.maxThreadsDim[2] << " " << std::endl; std::cout << " maxGridSize: " << cdp.maxGridSize[0] << " " << cdp.maxGridSize[1] << " " << cdp.maxGridSize[2] << " " << std::endl; std::cout << " totalConstMem: " << cdp.totalConstMem / 1024.0 << " kB" << std::endl; std::cout << " version: " << cdp.major << "." << cdp.minor << std::endl; std::cout << " clockRate: " << cdp.clockRate / 1000.0 << " MHz" << std::endl; std::cout << " textureAlignment: " << cdp.textureAlignment << std::endl; std::cout << " deviceOverlap: " << (cdp.deviceOverlap ? "true" : "false") << std::endl; std::cout << " multiProcessorCount: " << cdp.multiProcessorCount << std::endl; std::cout << " kernelExecTimeoutEnabled: " << (cdp.kernelExecTimeoutEnabled ? "true" : "false") << std::endl; std::cout << " integrated: " << (cdp.integrated ? "true" : "false") << std::endl; std::cout << " canMapHostMemory: " << (cdp.canMapHostMemory ? "true" : "false") << std::endl; std::cout << " computeMode: " << (cdp.computeMode == cudaComputeModeDefault ? "cudaComputeModeDefault" : (cdp.computeMode == cudaComputeModeExclusive ? "cudaComputeModeExclusive" : (cdp.computeMode == cudaComputeModeProhibited ? "cudaComputeModeProhibited" : "unknown"))) << std::endl; std::cout << " concurrentKernels: " << (cdp.concurrentKernels ? "true" : "false") << std::endl; std::cout << " ECCEnabled: " << (cdp.ECCEnabled ? "true" : "false") << std::endl; std::cout << " pciBusID: " << cdp.pciBusID << std::endl; std::cout << " pciDeviceID: " << cdp.pciDeviceID << std::endl; std::cout << " tccDriver: " << (cdp.tccDriver ? "true" : "false") << std::endl; std::cout << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(cpu2_dataset, cpu_dataset, NUM_POINTS * 4); std::cout << "1 GB host -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } float* gpu_dataset; float* gpu2_dataset; cudaMalloc((void**)&gpu_dataset, NUM_POINTS * 4); cudaMalloc((void**)&gpu2_dataset, NUM_POINTS * 4); std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); cudaMemcpy(gpu_dataset, cpu_dataset, NUM_POINTS * 4, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); std::cout << "1 GB host -> device: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); inplaceOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device in-place operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); immutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset, gpu2_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); constImmutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(gpu_dataset, gpu2_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device const immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); cudaMemcpy(cpu_dataset, gpu_dataset, NUM_POINTS * 4, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); std::cout << "1 GB device -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; float* pinned_dataset; cudaMallocHost((void**)&pinned_dataset, NUM_POINTS * 4); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(pinned_dataset, cpu_dataset, NUM_POINTS * 4); std::cout << "1 GB host -> pinned: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } float* mapped_dataset; cudaHostGetDevicePointer((void**)&mapped_dataset, (void*)pinned_dataset, 0); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); inplaceOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device in-place operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); immutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset, gpu2_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); constImmutableOperation<<<NUM_POINTS / cdp.maxThreadsPerBlock, cdp.maxThreadsPerBlock>>>(mapped_dataset, gpu2_dataset); cudaDeviceSynchronize(); std::cout << "1 GB device const immutable operation: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); memcpy(cpu_dataset, pinned_dataset, NUM_POINTS * 4); std::cout << "1 GB pinned -> host: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } std::cout << "check " << cpu_dataset[0] << " " << cpu_dataset[1] << " " << cpu_dataset[2] << std::endl; for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); cudaMemcpy(gpu2_dataset, gpu_dataset, NUM_POINTS * 4, cudaMemcpyDeviceToDevice); cudaDeviceSynchronize(); std::cout << "1 GB device -> device: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } const float atomic_init[32] = {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}; float* global_atomics; cudaMalloc((void**)&global_atomics, 32 * 4); float cpu_atomics[32]; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomic<<<32768, 32>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, 32>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 32 * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomic<<<32768, 64>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, 64>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 64 * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomic<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomicBy32<<<32768, 32>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, 32>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 32 * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomicBy32<<<32768, 64>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, 64>>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 64 * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; cudaMemcpy(global_atomics, atomic_init, 32 * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomicBy32<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics); cudaDeviceSynchronize(); std::cout << "hit atomics by 32 on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics, global_atomics, 32 * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * cdp.maxThreadsPerBlock * 5 / 32 << ": "; for (int i = 0; i < 32; i++) std::cout << cpu_atomics[i] << " "; std::cout << std::endl; float* atomic_init2 = (float*)malloc(cdp.maxThreadsPerBlock * 4); float* global_atomics2; cudaMalloc((void**)&global_atomics2, cdp.maxThreadsPerBlock * 4); float* cpu_atomics2 = new float[cdp.maxThreadsPerBlock]; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) atomic_init2[i] = 0.0; cudaMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitNaiveThreadLocal<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics2); cudaDeviceSynchronize(); std::cout << "hit naive thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 5 << ": "; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) std::cout << cpu_atomics2[i] << " "; std::cout << std::endl; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) atomic_init2[i] = 0.0; cudaMemcpy(global_atomics2, atomic_init2, cdp.maxThreadsPerBlock * 4, cudaMemcpyHostToDevice); for (int i = 0; i < 5; i++) { std::clock_t startTime = std::clock(); hitAtomicThreadLocal<<<32768, cdp.maxThreadsPerBlock>>>(global_atomics2); cudaDeviceSynchronize(); std::cout << "hit atomic thread local by " << cdp.maxThreadsPerBlock << " on global <<<32768, " << cdp.maxThreadsPerBlock << ">>>: " << 1e3 * (std::clock() - startTime) / (double) CLOCKS_PER_SEC << " ms" << std::endl; } cudaMemcpy(cpu_atomics2, global_atomics2, cdp.maxThreadsPerBlock * 4, cudaMemcpyDeviceToHost); std::cout << "check for " << 32768 * 5 << ": "; for (int i = 0; i < cdp.maxThreadsPerBlock; i++) std::cout << cpu_atomics2[i] << " "; std::cout << std::endl; return 0; }
7f889aad4464a2643ac69763158055c5a07b9c68.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // EXAMPLE OF HIERARCHICAL THREE PHASE PREFIX-SCAN CHAPTER 8 // Efficient_Kogge_Stone_scan: // Using this three-phase approach, we can use a much smaller number of // threads then the number of the elements in a section. The maximal size // of a section is no longer limited by the number of threads in the block // but rather, the size of shared memory; all elements in a section // must to fit into the shared memory. // //////////////////////////////////////////////////////////////////////////// // With 8192 elements using float numbers there are approximation problems //////////////////////////////////////////////////////////////////////////// #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define SECTION_SIZE 4096 #define BLOCK_DIM 1024 #define SUBSECTION_SIZE SECTION_SIZE / BLOCK_DIM hipError_t efficient_Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime); void sequential_scan(float *x, float *y, int Max_i); void print_Array(float *A, int size); int verify_result(float *Y, float *YS, int size); __device__ void print_Array_device(float *A, int size) { for (int i = 0; i < size; i++) { printf("A[%d] = %.2f\n", i, A[i]); } printf("\n\n"); } //////////////////////////////////////////////////////////////////////////////// //! Efficient prefix sum //! @param X input data in global memory //! @param Y output data in global memory //! @param InputSize size of input and output data //////////////////////////////////////////////////////////////////////////////// __global__ void efficient_Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; __shared__ float AUS[BLOCK_DIM]; //int i = blockIdx.x * blockDim.x + threadIdx.x; // Keep mind: Partition the input into blockDim.x subsections: i.e. for 8 threads --> 8 subsections // collaborative load in a coalesced manner for (int j = 0; j < SECTION_SIZE; j += blockDim.x) { XY[threadIdx.x + j] = X[threadIdx.x + j]; } __syncthreads(); // PHASE 1: scan inner own subsection // At the end of this phase the last element of each subsection contains the sum of all alements in own subsection for (int j = 1; j < SUBSECTION_SIZE; j++) { XY[threadIdx.x * (SUBSECTION_SIZE)+j] += XY[threadIdx.x * (SUBSECTION_SIZE)+j - 1]; } __syncthreads(); // PHASE 2: perform iterative kogge_stone_scan of the last elements of each subsections of XY loaded first in AUS AUS[threadIdx.x] = XY[threadIdx.x * (SUBSECTION_SIZE)+(SUBSECTION_SIZE)-1]; float in; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (threadIdx.x >= stride) { in = AUS[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { AUS[threadIdx.x] += in; } } __syncthreads(); // PHASE 3: each thread adds to its elements the new value of the last element of its predecessor's section if (threadIdx.x > 0) { for (unsigned int stride = 0; stride < (SUBSECTION_SIZE); stride++) { XY[threadIdx.x * (SUBSECTION_SIZE)+stride] += AUS[threadIdx.x - 1]; // <-- } } __syncthreads(); // store the result into output vector for (int j = 0; j < SECTION_SIZE; j += blockDim.x) { Y[threadIdx.x + j] = XY[threadIdx.x + j]; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main() { const int arraySize = 4096; float *Y, *YS, *X; //float X[arraySize] = { 2,1,3,1,0,4,1,2,0,3,1,2,5,3,1,2 }; float msTime, msTime_seq; hipEvent_t startTimeCuda, stopTimeCuda; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); X = (float*)malloc(arraySize * sizeof(float)); Y = (float*)malloc(arraySize * sizeof(float)); YS = (float*)malloc(arraySize * sizeof(float)); //fill input vector for (int i = 0; i < arraySize; i++) { X[i] = (float)(i + 1.0); } //printf("Array input:"); //print_Array(X, arraySize); // ---------------------- PERFORM SEQUENTIAL SCAN ---------------- printf("Sequential scan...\n"); hipEventRecord(startTimeCuda, 0); hipEventSynchronize(startTimeCuda); sequential_scan(X, YS, arraySize); hipEventRecord(stopTimeCuda, 0); hipEventSynchronize(stopTimeCuda); hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n\n", msTime_seq); //print_Array(YS, arraySize); //printf(" OK!\n"); // ---------------------- PERFORM PARALELL SCAN ------------------ printf("Parallel scan...\n"); hipError_t cudaStatus = efficient_Kogge_Stone_scan(X, Y, arraySize, &msTime); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); goto Error; } //print_Array(Y, arraySize); //printf(" OK!\n"); // ------------------ verify the result. ----------------------------------- if (verify_result(Y, YS, arraySize)) { goto Error; } printf("TEST PASSED!\n"); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); goto Error; } printf("Speedup: %f\n", msTime_seq / msTime); free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 0; Error: free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 1; } // Helper function for using CUDA to perform scan in parallel. hipError_t efficient_Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime) { float *dev_X, *dev_Y; hipError_t cudaStatus; hipEvent_t startTimeCuda, stopTimeCuda; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for two vectors. cudaStatus = hipMalloc((void**)&dev_X, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_Y, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vector from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_X, X, size * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with BLOCK_DIM theads per block. hipEventRecord(startTimeCuda, 0); hipEventSynchronize(startTimeCuda); efficient_Kogge_Stone_scan_kernel << <1, BLOCK_DIM >> > (dev_X, dev_Y, size); hipEventRecord(stopTimeCuda, 0); hipEventSynchronize(stopTimeCuda); hipEventElapsedTime(msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n\n", *msTime); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching efficient_Kogge_Stone_scan_kernel Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(Y, dev_Y, size * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_X); hipFree(dev_Y); return cudaStatus; } void sequential_scan(float *x, float *y, int Max_i) { float accumulator = x[0]; y[0] = accumulator; for (int i = 1; i < Max_i; i++) { accumulator += x[i]; y[i] = accumulator; } } void print_Array(float *A, int size) { for (int i = 0; i < size; i++) { printf("%.2f ", A[i]); } printf("\n\n"); } int verify_result(float *Y, float *YS, int size) { for (int i = 0; i < size; i++) { if (Y[i] - YS[i] > 1e-5) { printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i); return 1; } } return 0; }
7f889aad4464a2643ac69763158055c5a07b9c68.cu
//////////////////////////////////////////////////////////////////////////// // // EXAMPLE OF HIERARCHICAL THREE PHASE PREFIX-SCAN CHAPTER 8 // Efficient_Kogge_Stone_scan: // Using this three-phase approach, we can use a much smaller number of // threads then the number of the elements in a section. The maximal size // of a section is no longer limited by the number of threads in the block // but rather, the size of shared memory; all elements in a section // must to fit into the shared memory. // //////////////////////////////////////////////////////////////////////////// // With 8192 elements using float numbers there are approximation problems //////////////////////////////////////////////////////////////////////////// #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #define SECTION_SIZE 4096 #define BLOCK_DIM 1024 #define SUBSECTION_SIZE SECTION_SIZE / BLOCK_DIM cudaError_t efficient_Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime); void sequential_scan(float *x, float *y, int Max_i); void print_Array(float *A, int size); int verify_result(float *Y, float *YS, int size); __device__ void print_Array_device(float *A, int size) { for (int i = 0; i < size; i++) { printf("A[%d] = %.2f\n", i, A[i]); } printf("\n\n"); } //////////////////////////////////////////////////////////////////////////////// //! Efficient prefix sum //! @param X input data in global memory //! @param Y output data in global memory //! @param InputSize size of input and output data //////////////////////////////////////////////////////////////////////////////// __global__ void efficient_Kogge_Stone_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; __shared__ float AUS[BLOCK_DIM]; //int i = blockIdx.x * blockDim.x + threadIdx.x; // Keep mind: Partition the input into blockDim.x subsections: i.e. for 8 threads --> 8 subsections // collaborative load in a coalesced manner for (int j = 0; j < SECTION_SIZE; j += blockDim.x) { XY[threadIdx.x + j] = X[threadIdx.x + j]; } __syncthreads(); // PHASE 1: scan inner own subsection // At the end of this phase the last element of each subsection contains the sum of all alements in own subsection for (int j = 1; j < SUBSECTION_SIZE; j++) { XY[threadIdx.x * (SUBSECTION_SIZE)+j] += XY[threadIdx.x * (SUBSECTION_SIZE)+j - 1]; } __syncthreads(); // PHASE 2: perform iterative kogge_stone_scan of the last elements of each subsections of XY loaded first in AUS AUS[threadIdx.x] = XY[threadIdx.x * (SUBSECTION_SIZE)+(SUBSECTION_SIZE)-1]; float in; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (threadIdx.x >= stride) { in = AUS[threadIdx.x - stride]; } __syncthreads(); if (threadIdx.x >= stride) { AUS[threadIdx.x] += in; } } __syncthreads(); // PHASE 3: each thread adds to its elements the new value of the last element of its predecessor's section if (threadIdx.x > 0) { for (unsigned int stride = 0; stride < (SUBSECTION_SIZE); stride++) { XY[threadIdx.x * (SUBSECTION_SIZE)+stride] += AUS[threadIdx.x - 1]; // <-- } } __syncthreads(); // store the result into output vector for (int j = 0; j < SECTION_SIZE; j += blockDim.x) { Y[threadIdx.x + j] = XY[threadIdx.x + j]; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main() { const int arraySize = 4096; float *Y, *YS, *X; //float X[arraySize] = { 2,1,3,1,0,4,1,2,0,3,1,2,5,3,1,2 }; float msTime, msTime_seq; cudaEvent_t startTimeCuda, stopTimeCuda; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); X = (float*)malloc(arraySize * sizeof(float)); Y = (float*)malloc(arraySize * sizeof(float)); YS = (float*)malloc(arraySize * sizeof(float)); //fill input vector for (int i = 0; i < arraySize; i++) { X[i] = (float)(i + 1.0); } //printf("Array input:"); //print_Array(X, arraySize); // ---------------------- PERFORM SEQUENTIAL SCAN ---------------- printf("Sequential scan...\n"); cudaEventRecord(startTimeCuda, 0); cudaEventSynchronize(startTimeCuda); sequential_scan(X, YS, arraySize); cudaEventRecord(stopTimeCuda, 0); cudaEventSynchronize(stopTimeCuda); cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n\n", msTime_seq); //print_Array(YS, arraySize); //printf(" OK!\n"); // ---------------------- PERFORM PARALELL SCAN ------------------ printf("Parallel scan...\n"); cudaError_t cudaStatus = efficient_Kogge_Stone_scan(X, Y, arraySize, &msTime); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); goto Error; } //print_Array(Y, arraySize); //printf(" OK!\n"); // ------------------ verify the result. ----------------------------------- if (verify_result(Y, YS, arraySize)) { goto Error; } printf("TEST PASSED!\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); goto Error; } printf("Speedup: %f\n", msTime_seq / msTime); free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 0; Error: free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 1; } // Helper function for using CUDA to perform scan in parallel. cudaError_t efficient_Kogge_Stone_scan(float *X, float *Y, unsigned int size, float *msTime) { float *dev_X, *dev_Y; cudaError_t cudaStatus; cudaEvent_t startTimeCuda, stopTimeCuda; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for two vectors. cudaStatus = cudaMalloc((void**)&dev_X, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_Y, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vector from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_X, X, size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with BLOCK_DIM theads per block. cudaEventRecord(startTimeCuda, 0); cudaEventSynchronize(startTimeCuda); efficient_Kogge_Stone_scan_kernel << <1, BLOCK_DIM >> > (dev_X, dev_Y, size); cudaEventRecord(stopTimeCuda, 0); cudaEventSynchronize(stopTimeCuda); cudaEventElapsedTime(msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n\n", *msTime); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching efficient_Kogge_Stone_scan_kernel Kernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(Y, dev_Y, size * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_X); cudaFree(dev_Y); return cudaStatus; } void sequential_scan(float *x, float *y, int Max_i) { float accumulator = x[0]; y[0] = accumulator; for (int i = 1; i < Max_i; i++) { accumulator += x[i]; y[i] = accumulator; } } void print_Array(float *A, int size) { for (int i = 0; i < size; i++) { printf("%.2f ", A[i]); } printf("\n\n"); } int verify_result(float *Y, float *YS, int size) { for (int i = 0; i < size; i++) { if (Y[i] - YS[i] > 1e-5) { printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i); return 1; } } return 0; }
d6615a67a59460978a9e68a7821373dff34b77b2.hip
// !!! This is a file automatically generated by hipify!!! #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, hipStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
d6615a67a59460978a9e68a7821373dff34b77b2.cu
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2) // generated by gen_cutlass_matrix_mul_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #pragma GCC diagnostic ignored "-Wuninitialized" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl" using LayoutA = cutlass::layout::RowMajor; using LayoutB = cutlass::layout::RowMajor; using ThreadBlockShape = cutlass::gemm::GemmShape<8, 32, 8>; using WarpShape = cutlass::gemm::GemmShape<8, 32, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>; using Gemm = cutlass::gemm::device::Gemm< float, LayoutA, float, LayoutB, float, cutlass::layout::RowMajor, float, cutlass::arch::OpClassSimt, cutlass::arch::Sm50, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2>; template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>( const typename Gemm::ElementA* d_A, size_t lda, const typename Gemm::ElementB* d_B, size_t ldb, typename Gemm::ElementC* d_C, size_t ldc, int* workspace, cutlass::gemm::GemmCoord const& problem_size, typename Gemm::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream, int split_k_slices); #pragma GCC diagnostic pop #endif
2ca9d79fc8777bb29eda8df29228cf55607eaf81.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> void incrementArrayOnHost(float *a, int N) { int i; for (i = 0; i < N; i++) a[i] = a[i] + 1.f; } __global__ void incrementArrayOnDevice(float *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) a[idx] = a[idx] + 1.f; } int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d; // pointer to device memory int i, errcode, N = 10; size_t size = N * sizeof(float); // allocate arrays on host a_h = (float *) malloc(size); b_h = (float *) malloc(size); // allocate array on device errcode = hipMalloc((void **) &a_d, size); if (errcode != hipSuccess) { fprintf(stderr, "Error invoking hipMalloc (ERRCODE %d)\n", errcode); return 1; } // initialization of host data for (i = 0; i < N; i++) a_h[i] = (float) i; // copy data from host to device errcode = hipMemcpy(a_d, a_h, sizeof(float) * N, hipMemcpyHostToDevice); if (errcode != hipSuccess) { fprintf(stderr, "Error invoking hipMemcpy (ERRCODE %d)\n", errcode); return 1; } // do calculation on host incrementArrayOnHost(a_h, N); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0 ? 0 : 1); // Part 2 of 2. Call incrementArrayOnDevice kernel hipLaunchKernelGGL(( incrementArrayOnDevice), dim3(nBlocks), dim3(blockSize), 0, 0, a_d, N); // Retrieve result from device and store in b_h errcode = hipMemcpy(b_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost); if (errcode != hipSuccess) { fprintf(stderr, "Error invoking hipMemcpy (ERRCODE %d)\n", errcode); return 1; } // check results for (i = 0; i < N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); hipFree(a_d); }
2ca9d79fc8777bb29eda8df29228cf55607eaf81.cu
#include <stdio.h> #include <assert.h> #include <cuda.h> void incrementArrayOnHost(float *a, int N) { int i; for (i = 0; i < N; i++) a[i] = a[i] + 1.f; } __global__ void incrementArrayOnDevice(float *a, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) a[idx] = a[idx] + 1.f; } int main(void) { float *a_h, *b_h; // pointers to host memory float *a_d; // pointer to device memory int i, errcode, N = 10; size_t size = N * sizeof(float); // allocate arrays on host a_h = (float *) malloc(size); b_h = (float *) malloc(size); // allocate array on device errcode = cudaMalloc((void **) &a_d, size); if (errcode != cudaSuccess) { fprintf(stderr, "Error invoking cudaMalloc (ERRCODE %d)\n", errcode); return 1; } // initialization of host data for (i = 0; i < N; i++) a_h[i] = (float) i; // copy data from host to device errcode = cudaMemcpy(a_d, a_h, sizeof(float) * N, cudaMemcpyHostToDevice); if (errcode != cudaSuccess) { fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", errcode); return 1; } // do calculation on host incrementArrayOnHost(a_h, N); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0 ? 0 : 1); // Part 2 of 2. Call incrementArrayOnDevice kernel incrementArrayOnDevice<<<nBlocks, blockSize>>>(a_d, N); // Retrieve result from device and store in b_h errcode = cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost); if (errcode != cudaSuccess) { fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", errcode); return 1; } // check results for (i = 0; i < N; i++) assert(a_h[i] == b_h[i]); // cleanup free(a_h); free(b_h); cudaFree(a_d); }
8fd0f83ccf32a1c414e69b5620a48d113ae450af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:54 2012 */ #include "common_magma.h" #define PRECISION_s #if (GPUSHMEM >= 200) #define ssymv_bs 32 #define bank_shift 33 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_ssymv_200_L_special_mgpu_offset_32( magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; if(blkc < my_gpu_id) { return; } float res = MAGMA_S_ZERO;// used in scan the row float res_ = MAGMA_S_ZERO;// used in scan the column float res1 = MAGMA_S_ZERO;// tem for res float res2 = MAGMA_S_ZERO;// tem for res_ __shared__ float la [ssymv_bs][bank_shift]; __shared__ float sdata [ssymv_bs][9]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; if(blkc == 0 && my_gpu_id == 0 && tx < kstan) { MAGMA_S_SET2REAL(buff[tx], 0.0); } } // obtain the vector x store in buff; magma_int_t flag = 0; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i < tx ) { la[0][bank_shift * tx + i] = ( la[0][ i * bank_shift + tx] ) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; flag = 1; } x -= blkc * ssymv_bs *incx ; x= x- tx*incx; magma_int_t wc_c = my_gpu_id ; magma_int_t count = 0 ; WC += break_d + tx; magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { num_blocks_iters += 1; } x += (my_gpu_id ) * ssymv_bs ; if( blkc > my_gpu_id) for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; if(my_gpu_id == 0 && tx < kstan && count==1) { MAGMA_S_SET2REAL(buff2[tx], 0.0); } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } sdata[tx][ty]= res_ ; __syncthreads(); if( ty== 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_ssymv_200_L_generic_mgpu_offset_32(magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_nb, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; if(blkc < my_gpu_id) { return; } float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; float res2 = MAGMA_S_ZERO; __shared__ float la [ssymv_bs][bank_shift]; __shared__ float sdata [ssymv_bs][9]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_nb ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_nb ) trackA=m_mod_nb; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } if(ty == 0 ) { if(my_gpu_id == 0 && blkc ==0 && tx < kstan)// { MAGMA_S_SET2REAL(buff[tx], 0.0); } } magma_int_t flag = 0; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j+=8){ if( ( ty + j ) > m_mod_nb ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i < tx ) { la[0][bank_shift*tx+i] = (la[0][i*bank_shift+tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += (la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; flag = 1; } __syncthreads(); x= x - break_d *incx ; x= x - tx * incx ; magma_int_t wc_c = my_gpu_id ; magma_int_t count = 0 ; WC += break_d + tx; magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { num_blocks_iters += 1; } x += (my_gpu_id ) * ssymv_bs ; if( blkc > my_gpu_id) for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; if(my_gpu_id == 0 && tx < kstan && count==1)// { MAGMA_S_SET2REAL(buff2[tx], 0.0); } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } sdata[tx][ty]= res_ ; __syncthreads(); if( ty== 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * */ __global__ void magmablas_ssymv_200_L_update_mgpu_offset_32(magma_int_t n, float alpha, float* A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * ssymv_bs + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x* ssymv_bs; i<n; i+= ssymv_bs){ Ca += WC[0] ; WC += ssymv_bs; } if( ind < n && ind >= kstan) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_200_L_mgpu_offset_32(magma_int_t m, float alpha, float *A, magma_int_t lda, float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb; X += the_chosen_block_id * nb; Y += the_chosen_block_id * nb; magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { hipLaunchKernelGGL(( magmablas_ssymv_200_L_special_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } else { magma_int_t m_mod_nb = m%ssymv_bs - 1; hipLaunchKernelGGL(( magmablas_ssymv_200_L_generic_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_nb, my_gpu_id, num_gpus, nb, kstan); } hipLaunchKernelGGL(( magmablas_ssymv_200_L_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } /******************************************************************************* * Functions for each specific cases - Upper case */ __global__ void magmablas_ssymv_200_U_special_mgpu_offset_32( magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO;// used in scan the row float res_ = MAGMA_S_ZERO;// used in scan the column float res1 = MAGMA_S_ZERO;// tem for res float res2 = MAGMA_S_ZERO;// tem for res_ __shared__ float la [ssymv_bs][bank_shift]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; if(blkc == 0 && tx < kstan) { MAGMA_S_SET2REAL(buff[tx], 0.0); } } // obtain the vector x store in buff; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i > tx ) { la[0][bank_shift * tx + i] = (la[0][ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; } __syncthreads(); x -= (break_d + tx ) * incx;// return to the beginning x += (my_gpu_id ) * ssymv_bs ;// magma_int_t wc_c = my_gpu_id ; magma_int_t total_blocks_gpu = gridDim.x /num_gpus; if( my_gpu_id < ( gridDim.x % num_gpus) ) { total_blocks_gpu += 1; } magma_int_t shift = (blkc +1) /num_gpus ; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { shift += 1; } #pragma unroll for(magma_int_t s=0; s<shift; s++) { x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; wc_c += num_gpus; } WC += break_d + tx; magma_int_t num_blocks_iters = total_blocks_gpu - shift; magma_int_t count = 0; for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_ssymv_200_U_generic_mgpu_offset_32(magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_thread_x, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan, magma_int_t the_right_gpu) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; float res2 = MAGMA_S_ZERO; __shared__ float la [ssymv_bs][bank_shift]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 )) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ) { buff[tx] = x[0]; } A += tx ; } if(ty == 0 ) { if(blkc ==0 && tx < kstan)// { MAGMA_S_SET2REAL(buff[tx], 0.0); } } if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i > tx ) { la[0][bank_shift * tx + i] = (la[0][ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += (la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; } x -= (break_d + tx ) * incx;// return to the beginning x += (my_gpu_id ) * ssymv_bs ;// magma_int_t wc_c = my_gpu_id ; magma_int_t total_blocks_gpu = gridDim.x /num_gpus; if( my_gpu_id < ( gridDim.x % num_gpus) ) { total_blocks_gpu += 1; } magma_int_t shift = (blkc +1) /num_gpus ; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { shift += 1; } #pragma unroll for(magma_int_t s=0; s<shift; s++) { x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; wc_c += num_gpus; } WC += break_d + tx; magma_int_t num_blocks_iters = total_blocks_gpu - shift; magma_int_t count = 0; for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; if(my_gpu_id == the_right_gpu && s==num_blocks_iters-1) { if( ty == 0 ) { if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff2[tx],0); } else buff2[tx] = x[tx]; } #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8) { if( ( ty + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 0); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } __syncthreads(); }// end of the_right_gpu else { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); } #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_ssymv_200_U_update_mgpu_offset_32(magma_int_t n, float alpha, float* A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * ssymv_bs + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= blockIdx.x * lda + tx; for(i = 0; i<(blockIdx.x+1)*ssymv_bs; i+= ssymv_bs) { Ca += WC[0] ; WC += ssymv_bs ; } if( ind < n && ind >= kstan) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_200_U_mgpu_offset_32(magma_int_t m, float alpha, float *A, magma_int_t lda, float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped, magma_int_t the_right_gpu) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb; X += the_chosen_block_id * nb; Y += the_chosen_block_id * nb; magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { hipLaunchKernelGGL(( magmablas_ssymv_200_U_special_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } else{ magma_int_t m_mod_thread_x = m%ssymv_bs - 1; hipLaunchKernelGGL(( magmablas_ssymv_200_U_generic_mgpu_offset_32) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, my_gpu_id, num_gpus, nb, kstan, the_right_gpu); } hipLaunchKernelGGL(( magmablas_ssymv_200_U_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_mgpu_32_offset( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, hipStream_t stream[][10]) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); magmablasSetKernelStream(stream[i][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus; magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus; if(i < the_chosen_gpu_id) { num_blocks_skipped += 1; } int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus; magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_ssymv2_mgpu_32_offset( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); // magmablasSetKernelStream(stream[i][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus; magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus; if(i < the_chosen_gpu_id) { num_blocks_skipped += 1; } int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus; magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu); } else magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped); } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_ssymv2_mgpu_32( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], i, num_gpus, nb, 0, 0, the_right_gpu); } else magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], i, num_gpus, nb, 0, 0); } return MAGMA_SUCCESS; } __global__ void kernel_fillZero(float *A, magma_int_t size) { magma_int_t id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) { MAGMA_S_SET2REAL(A[id], 0.0); } } void fillZero(float *A, magma_int_t size) { magma_int_t blocks = (size-1)/512 + 1; dim3 grid(blocks, 1, 1); dim3 threads(512, 1, 1); hipLaunchKernelGGL(( kernel_fillZero), dim3(grid), dim3(threads), 0, 0, A, size); } #endif /* (GPUSHMEM >= 200) */
8fd0f83ccf32a1c414e69b5620a48d113ae450af.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:54 2012 */ #include "common_magma.h" #define PRECISION_s #if (GPUSHMEM >= 200) #define ssymv_bs 32 #define bank_shift 33 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_ssymv_200_L_special_mgpu_offset_32( magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; if(blkc < my_gpu_id) { return; } float res = MAGMA_S_ZERO;// used in scan the row float res_ = MAGMA_S_ZERO;// used in scan the column float res1 = MAGMA_S_ZERO;// tem for res float res2 = MAGMA_S_ZERO;// tem for res_ __shared__ float la [ssymv_bs][bank_shift]; __shared__ float sdata [ssymv_bs][9]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; if(blkc == 0 && my_gpu_id == 0 && tx < kstan) { MAGMA_S_SET2REAL(buff[tx], 0.0); } } // obtain the vector x store in buff; magma_int_t flag = 0; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i < tx ) { la[0][bank_shift * tx + i] = ( la[0][ i * bank_shift + tx] ) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; flag = 1; } x -= blkc * ssymv_bs *incx ; x= x- tx*incx; magma_int_t wc_c = my_gpu_id ; magma_int_t count = 0 ; WC += break_d + tx; magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { num_blocks_iters += 1; } x += (my_gpu_id ) * ssymv_bs ; if( blkc > my_gpu_id) for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; if(my_gpu_id == 0 && tx < kstan && count==1) { MAGMA_S_SET2REAL(buff2[tx], 0.0); } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } sdata[tx][ty]= res_ ; __syncthreads(); if( ty== 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_ssymv_200_L_generic_mgpu_offset_32(magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_nb, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; if(blkc < my_gpu_id) { return; } float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; float res2 = MAGMA_S_ZERO; __shared__ float la [ssymv_bs][bank_shift]; __shared__ float sdata [ssymv_bs][9]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_nb ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_nb ) trackA=m_mod_nb; else trackA=tx; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx; A += trackA ; } if(ty == 0 ) { if(my_gpu_id == 0 && blkc ==0 && tx < kstan)// { MAGMA_S_SET2REAL(buff[tx], 0.0); } } magma_int_t flag = 0; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j+=8){ if( ( ty + j ) > m_mod_nb ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i < tx ) { la[0][bank_shift*tx+i] = (la[0][i*bank_shift+tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += (la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; flag = 1; } __syncthreads(); x= x - break_d *incx ; x= x - tx * incx ; magma_int_t wc_c = my_gpu_id ; magma_int_t count = 0 ; WC += break_d + tx; magma_int_t num_blocks_iters = (blkc +1) /num_gpus - flag; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { num_blocks_iters += 1; } x += (my_gpu_id ) * ssymv_bs ; if( blkc > my_gpu_id) for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j< ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; if(my_gpu_id == 0 && tx < kstan && count==1)// { MAGMA_S_SET2REAL(buff2[tx], 0.0); } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } sdata[tx][ty]= res_ ; __syncthreads(); if( ty== 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } /************************************************************** * */ __global__ void magmablas_ssymv_200_L_update_mgpu_offset_32(magma_int_t n, float alpha, float* A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * ssymv_bs + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x* ssymv_bs; i<n; i+= ssymv_bs){ Ca += WC[0] ; WC += ssymv_bs; } if( ind < n && ind >= kstan) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_200_L_mgpu_offset_32(magma_int_t m, float alpha, float *A, magma_int_t lda, float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb; X += the_chosen_block_id * nb; Y += the_chosen_block_id * nb; magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { magmablas_ssymv_200_L_special_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } else { magma_int_t m_mod_nb = m%ssymv_bs - 1; magmablas_ssymv_200_L_generic_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_nb, my_gpu_id, num_gpus, nb, kstan); } magmablas_ssymv_200_L_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } /******************************************************************************* * Functions for each specific cases - Upper case */ __global__ void magmablas_ssymv_200_U_special_mgpu_offset_32( magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO;// used in scan the row float res_ = MAGMA_S_ZERO;// used in scan the column float res1 = MAGMA_S_ZERO;// tem for res float res2 = MAGMA_S_ZERO;// tem for res_ __shared__ float la [ssymv_bs][bank_shift]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += ty * lda + tx ; if( ty == 0 ) { buff[tx] = x[0]; if(blkc == 0 && tx < kstan) { MAGMA_S_SET2REAL(buff[tx], 0.0); } } // obtain the vector x store in buff; if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty * 4 + 4) ; i++){ if ( i > tx ) { la[0][bank_shift * tx + i] = (la[0][ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; } __syncthreads(); x -= (break_d + tx ) * incx;// return to the beginning x += (my_gpu_id ) * ssymv_bs ;// magma_int_t wc_c = my_gpu_id ; magma_int_t total_blocks_gpu = gridDim.x /num_gpus; if( my_gpu_id < ( gridDim.x % num_gpus) ) { total_blocks_gpu += 1; } magma_int_t shift = (blkc +1) /num_gpus ; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { shift += 1; } #pragma unroll for(magma_int_t s=0; s<shift; s++) { x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; wc_c += num_gpus; } WC += break_d + tx; magma_int_t num_blocks_iters = total_blocks_gpu - shift; magma_int_t count = 0; for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_ssymv_200_U_generic_mgpu_offset_32(magma_int_t n, float alpha, float *A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t m_mod_thread_x, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan, magma_int_t the_right_gpu) { magma_int_t tx = threadIdx.x ; magma_int_t ty = threadIdx.y ; magma_int_t blkc = blockIdx.x ; float res = MAGMA_S_ZERO; float res_ = MAGMA_S_ZERO; float res1 = MAGMA_S_ZERO; float res2 = MAGMA_S_ZERO; __shared__ float la [ssymv_bs][bank_shift]; __shared__ float buff [ssymv_bs]; __shared__ float buff2 [ssymv_bs]; magma_int_t break_d = ssymv_bs * blkc; x += (break_d + tx ) * incx; A += break_d ; A += lda * ty; magma_int_t trackA ; if( blkc == ( gridDim.x - 1 )) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA ; } else { if( ty == 0 ) { buff[tx] = x[0]; } A += tx ; } if(ty == 0 ) { if(blkc ==0 && tx < kstan)// { MAGMA_S_SET2REAL(buff[tx], 0.0); } } if ( (blkc % num_gpus) == my_gpu_id) { A += lda * (blkc/num_gpus) * ssymv_bs; // change if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8){ if( ( ty + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 9999); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8){ la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(magma_int_t i=ty*4; i<(ty*4+4) ; i++){ if ( i > tx ) { la[0][bank_shift * tx + i] = (la[0][ i * bank_shift + tx]) ; } } __syncthreads(); #pragma unroll for(magma_int_t j=0; j < 4 ; j++) res += (la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/num_gpus) * ssymv_bs; } x -= (break_d + tx ) * incx;// return to the beginning x += (my_gpu_id ) * ssymv_bs ;// magma_int_t wc_c = my_gpu_id ; magma_int_t total_blocks_gpu = gridDim.x /num_gpus; if( my_gpu_id < ( gridDim.x % num_gpus) ) { total_blocks_gpu += 1; } magma_int_t shift = (blkc +1) /num_gpus ; if( my_gpu_id < ( (blkc+1) % num_gpus) ) { shift += 1; } #pragma unroll for(magma_int_t s=0; s<shift; s++) { x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; wc_c += num_gpus; } WC += break_d + tx; magma_int_t num_blocks_iters = total_blocks_gpu - shift; magma_int_t count = 0; for(magma_int_t s=0; s<num_blocks_iters; s++) { MAGMA_S_SET2REAL(res_,0); count++; if(my_gpu_id == the_right_gpu && s==num_blocks_iters-1) { if( ty == 0 ) { if( tx > m_mod_thread_x ) { MAGMA_S_SET2REAL(buff2[tx],0); } else buff2[tx] = x[tx]; } #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j+=8) { if( ( ty + j ) > m_mod_thread_x ) { MAGMA_S_SET2REAL(la[0][bank_shift*(ty+j)+tx], 0); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } __syncthreads(); }// end of the_right_gpu else { #pragma unroll for(magma_int_t j =0; j<ssymv_bs; j +=8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); } #pragma unroll for(magma_int_t j=0; j < 4 ; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += ( la[0][bank_shift * tx + j + ty * 4] ) * buff[j + ty * 4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty]= res_ ; __syncthreads(); if( ty== 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += num_gpus; x += num_gpus * ssymv_bs; A += lda * ssymv_bs ; } la[0][bank_shift*tx+ty]= res ; __syncthreads(); if( ty== 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } } __global__ void magmablas_ssymv_200_U_update_mgpu_offset_32(magma_int_t n, float alpha, float* A, magma_int_t lda, float *x, magma_int_t incx, float beta, float *y, magma_int_t incy, float *WC, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t kstan ) { magma_int_t i; magma_int_t tx = threadIdx.x ; magma_int_t ind = blockIdx.x * ssymv_bs + tx ; float Ca; MAGMA_S_SET2REAL(Ca, 0) ; WC+= blockIdx.x * lda + tx; for(i = 0; i<(blockIdx.x+1)*ssymv_bs; i+= ssymv_bs) { Ca += WC[0] ; WC += ssymv_bs ; } if( ind < n && ind >= kstan) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_ssymv_200_U_mgpu_offset_32(magma_int_t m, float alpha, float *A, magma_int_t lda, float *X, magma_int_t incx, float beta, float *Y, magma_int_t incy, float *dC_work, magma_int_t my_gpu_id, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped, magma_int_t the_right_gpu) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; A += lda * num_blocks_skipped * nb + the_chosen_block_id * nb; X += the_chosen_block_id * nb; Y += the_chosen_block_id * nb; magma_int_t blocks; if (m % ssymv_bs==0) blocks = m / ssymv_bs; else blocks = m / ssymv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of ssymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % ssymv_bs == 0 ) { magmablas_ssymv_200_U_special_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } else{ magma_int_t m_mod_thread_x = m%ssymv_bs - 1; magmablas_ssymv_200_U_generic_mgpu_offset_32 <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x, my_gpu_id, num_gpus, nb, kstan, the_right_gpu); } magmablas_ssymv_200_U_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work, my_gpu_id, num_gpus, nb, kstan); } /************************************************************************* Purpose ======= magmablas_ssymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_ssymv_mgpu_32_offset( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset, cudaStream_t stream[][10]) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); magmablasSetKernelStream(stream[i][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus; magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus; if(i < the_chosen_gpu_id) { num_blocks_skipped += 1; } int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus; magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_ssymv2_mgpu_32_offset( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb, magma_int_t offset) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); // magmablasSetKernelStream(stream[i][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % num_gpus; magma_int_t num_blocks_skipped = the_chosen_block_id / num_gpus; if(i < the_chosen_gpu_id) { num_blocks_skipped += 1; } int new_gpu_id = ( i + num_gpus - the_chosen_gpu_id ) % num_gpus; magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; the_right_gpu = ( the_right_gpu + num_gpus - the_chosen_gpu_id ) % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped, the_right_gpu); } else magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], new_gpu_id, num_gpus, nb, offset, num_blocks_skipped); } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_ssymv2_mgpu_32( char uplo, magma_int_t n, float alpha, float **A, magma_int_t lda, float **X, magma_int_t incx, float beta, float **Y, magma_int_t incy, float **work, magma_int_t lwork, magma_int_t num_gpus, magma_int_t nb) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_S_EQUAL(alpha, MAGMA_S_ZERO) && MAGMA_S_EQUAL(beta, MAGMA_S_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = n / ssymv_bs + (n % ssymv_bs != 0); magma_int_t workspace = lda * (blocks + 1); if (lwork < workspace){ printf("Not enough work space in magmablas_ssymv: passed %d, required %d\n", lwork, workspace); exit(1); } if(nb != 32) { printf("Error in magmablas_ssymv_200_mgpu: nb != 32, program will exit! please reallocate your matrix among GPUs\n"); exit(0); } magma_int_t i = 0; for(i=0; i<num_gpus; i++) { magma_setdevice(i); magma_int_t the_right_block_id = n / nb ; magma_int_t the_right_gpu = the_right_block_id % num_gpus; // the_right_gpu is used in Upper generic case. if ( upper) { magmablas_ssymv_200_U_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], i, num_gpus, nb, 0, 0, the_right_gpu); } else magmablas_ssymv_200_L_mgpu_offset_32(n, alpha, A[i], lda, X[i], incx, beta, Y[i], incy, work[i], i, num_gpus, nb, 0, 0); } return MAGMA_SUCCESS; } __global__ void kernel_fillZero(float *A, magma_int_t size) { magma_int_t id = blockIdx.x * blockDim.x + threadIdx.x; if(id < size) { MAGMA_S_SET2REAL(A[id], 0.0); } } void fillZero(float *A, magma_int_t size) { magma_int_t blocks = (size-1)/512 + 1; dim3 grid(blocks, 1, 1); dim3 threads(512, 1, 1); kernel_fillZero<<<grid, threads>>>(A, size); } #endif /* (GPUSHMEM >= 200) */
07ed3490f958b60b54f101323378472ec5fd8ea3.hip
// !!! This is a file automatically generated by hipify!!! /* * Inverse Discrete Cosine Transform in Column wise (DCT four) * DCT_IV_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_IV_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_IV_Column_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DCTIV_Column_Inverse_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.x + k*TILE_DIM) + 1)*PI_d*(2 * Row + 1) / (4.0 * numARows)))*sqrtf(2.0 / numARows);} else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTInverseColumnFourS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); hipError_t error; int devID = 0; // get number of SMs on this GPU error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Column_Inverse_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Column_Inverse_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; } }
07ed3490f958b60b54f101323378472ec5fd8ea3.cu
/* * Inverse Discrete Cosine Transform in Column wise (DCT four) * DCT_IV_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_IV_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_IV_Column_Inverse.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DCTIV_Column_Inverse_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.x + k*TILE_DIM) + 1)*PI_d*(2 * Row + 1) / (4.0 * numARows)))*sqrtf(2.0 / numARows);} else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTInverseColumnFourS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); cudaError_t error; int devID = 0; // get number of SMs on this GPU error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Column_Inverse_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Column_Inverse_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; } }
5011713d7e90b82f4ce529f047dfe73aa860314b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define dotPro(a,b) a(0)*b(0)+a(1)*b(1)+a(2)*b(2) __global__ void RunICP( int validPoints[N], float verProjD[N][3], float verNormD[N][3], float matRgPre[3], float matTransltgPre[3], const int focus, float verProS[N][3], float verNormS[N][3], const float epsD, const float epsN, const float epsDs, float Energy[N], float Ak[N][6], float Bk[N], int flag[N] ) { int gidx=blockIdx.x*blockDim.x+threadIdx.x; int gidy=blockIdx.y*blockDim.y+threadIdx.y; int i=gidx+(gidy-1)*gridDim.y;//num of order float vertexProj[3]; float normalProj[3]; float disVdiff[3]; float disNdiff[3]; float error; if (validPoints[i] == true && i<N){ //transformV and trandformR for(int k=1;k<3;k++) { vertexProj[k] = dotPro(verProjD[i], matRgPre)+matTransltgPre[k]; normalProj[k] = dotPro(verNormD[i], matRgPre); } //get index int x = (vertexProj[0] / vertexProj[2] * focus) + 320; int y = (vertexProj[1] / vertexProj[2] * focus) + 240; index=y*640 + x; if(index < 0 || index >= npts){ Energy[i]=0; flag[i]=0; return; } for(k=0;k<3;k++){ disVdiff[k] = verProjS[index][k]-vertexProj[k]; disNdiff[k] = verNormS[index][k]-normalProj[k]; } disV = dotPro(disVdiff, disVdiff); disN = dotPro(disNdiff, disNdiff); if( disV < epsD && disN >= epsN && disV > epsDs){ error=dotPro(disVdiff,verNormS[index]); Energy= error*error/10;//remember to reduce sumEngry Ak[i][0]=vertexProj[1]*verNormS[index][2]-vertexProj[2]*verNormS[index][1]; Ak[i][1]=vertexProj[2]*verNormS[index][0]-vertexProj[0]*verNormS[index][2]; Ak[i][2]=vertexProj[0]*verNormS[index][1]-vertexProj[1]*verNormS[index][0]; for(int k=0;k<3;k++) { Ak[i][k+3]=verNormS[index][k]; Bk[i][k]=dotPro(verNormS[index],disVdiff); } flag[i]=1; } else flag[i]=0; } else {flag[i]=0} }
5011713d7e90b82f4ce529f047dfe73aa860314b.cu
#define dotPro(a,b) a(0)*b(0)+a(1)*b(1)+a(2)*b(2) __global__ void RunICP( int validPoints[N], float verProjD[N][3], float verNormD[N][3], float matRgPre[3], float matTransltgPre[3], const int focus, float verProS[N][3], float verNormS[N][3], const float epsD, const float epsN, const float epsDs, float Energy[N], float Ak[N][6], float Bk[N], int flag[N] ) { int gidx=blockIdx.x*blockDim.x+threadIdx.x; int gidy=blockIdx.y*blockDim.y+threadIdx.y; int i=gidx+(gidy-1)*gridDim.y;//num of order float vertexProj[3]; float normalProj[3]; float disVdiff[3]; float disNdiff[3]; float error; if (validPoints[i] == true && i<N){ //transformV and trandformR for(int k=1;k<3;k++) { vertexProj[k] = dotPro(verProjD[i], matRgPre)+matTransltgPre[k]; normalProj[k] = dotPro(verNormD[i], matRgPre); } //get index int x = (vertexProj[0] / vertexProj[2] * focus) + 320; int y = (vertexProj[1] / vertexProj[2] * focus) + 240; index=y*640 + x; if(index < 0 || index >= npts){ Energy[i]=0; flag[i]=0; return; } for(k=0;k<3;k++){ disVdiff[k] = verProjS[index][k]-vertexProj[k]; disNdiff[k] = verNormS[index][k]-normalProj[k]; } disV = dotPro(disVdiff, disVdiff); disN = dotPro(disNdiff, disNdiff); if( disV < epsD && disN >= epsN && disV > epsDs){ error=dotPro(disVdiff,verNormS[index]); Energy= error*error/10;//remember to reduce sumEngry Ak[i][0]=vertexProj[1]*verNormS[index][2]-vertexProj[2]*verNormS[index][1]; Ak[i][1]=vertexProj[2]*verNormS[index][0]-vertexProj[0]*verNormS[index][2]; Ak[i][2]=vertexProj[0]*verNormS[index][1]-vertexProj[1]*verNormS[index][0]; for(int k=0;k<3;k++) { Ak[i][k+3]=verNormS[index][k]; Bk[i][k]=dotPro(verNormS[index],disVdiff); } flag[i]=1; } else flag[i]=0; } else {flag[i]=0} }
91ecb93c0d1cc35d152e5ed7e89eb5242f31e9de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************* c* Multimodal Deformable Image Registration * c* via Mutual Information or Bhattacharyya Distantce * c* Version: 1.0 * c* Language: C, CUDA * c* * c* Developer: Yifei Lou * c* Email: [email protected] * c* * c* School of Electrical and Computer Engineering * c* Georgia Institute of Technology * c* Atlanta, GA, 30318 * c* Website: http://groups.bme.gatech.edu/groups/bil/ * c* * c* Copyright (c) 2011 * c* All rights reserved. * c* * c* Permission to use, copy, or modify this code and its * c* documentation for scientific purpose is hereby granted * c* without fee, provided that this copyright notice appear in * c* all copies and that both that copyright notice and this * c* permission notice appear in supporting documentation. The use * c* for commercial purposes is prohibited without permission. * c* * c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT * c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF* c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * c* THE POSSIBILITY OF SUCH DAMAGE. * c* * c******************************************************************/ /******************************************************************* c* Short discription * c* main function to register two images on the current scale * c* including upsample and downsample * c******************************************************************/ #include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <rocblas.h> #include <cutil.h> #include <cutil_inline.h> #include "viscous_convolution.h" #include "viscous_global.h" // hash a point in the unit square to the index of // the grid bucket that contains it struct point_to_bucket_index : public thrust::unary_function<float2,unsigned int> { __host__ __device__ point_to_bucket_index(unsigned int width, unsigned int height) :w(width),h(height){} __host__ __device__ unsigned int operator()(float2 p) const { // find the raster indices of p's bucket unsigned int x = static_cast<unsigned int>(p.x * (w-1)); unsigned int y = static_cast<unsigned int>(p.y * (h-1)); // return the bucket's linear index return y * w + x; } unsigned int w, h; }; __global__ void downSample(float *src, float *dest, int NX, int NY, int NZ, int s) { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; if(tid < NX*NY*NZ) { int z = tid/(NX*NY); int y = (tid%(NX*NY))/NX; int x = tid%NX; float sum =0.0f; for(int xs = 0; xs<s; xs++) for(int ys =0; ys<s; ys++) sum += src[s*x+xs + (s*y+ys)*NX0 + s*z*NX0*NY0]; dest[tid] = sum/s/s; } } __global__ void upSample(float *src, float *dest, int NX, int NY, int NZ) // upsampling { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; if(tid < NX*NY*NZ) { int z = tid/(NX*NY); int y = (tid%(NX*NY))/NX; int x = tid%NX; int xmin = x/2 - (x%2 == 0); int xmax = x/2 + (x%2 == 1); int ymin = y/2 - (y%2 == 0); int ymax = y/2 + (y%2 == 1); int zmin = z/2 - (z%2 == 0); int zmax = z/2 + (z%2 == 1); xmin = (xmin < 0) ? 0: xmin; ymin = (ymin < 0) ? 0: ymin; zmin = (zmin < 0) ? 0: zmin; xmax = (xmax < NX)? xmax : NX-1; ymax = (ymax < NY)? ymax : NY-1; zmax = (zmax < NZ)? zmax : NZ-1; float wx = 0.25 + 0.5*(x%2==0); float wy = 0.25 + 0.5*(y%2==0); float wz = 0.25 + 0.5*(z%2==0); dest[tid] = src[xmin + ymin*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * (1.0-wz) + src[xmax + ymin*NX/2 + zmin*NX/2*NY/2] * wx * (1.0-wy) * (1.0-wz) + src[xmin + ymax*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * wy * (1.0-wz) + src[xmax + ymax*NX/2 + zmin*NX/2*NY/2] * wx * wy * (1.0-wz) + src[xmin + ymin*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * wz + src[xmax + ymin*NX/2 + zmax*NX/2*NY/2] * wx * (1.0-wy) * wz + src[xmin + ymax*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * wy * wz + src[xmax + ymax*NX/2 + zmax*NX/2*NY/2] * wx * wy * wz; dest[tid] = 2*dest[tid]; } } void compute(float *d_im_move, float *d_im_static, float *d_mv_x, float *d_mv_y, float *d_mv_z, int maxIter) // d_mv_x, d_mv_y and d_im_move are updated { // bind moving image to texture const hipExtent volumeSize = make_hipExtent(NX, NY, NZ); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); cutilSafeCall( hipMalloc3DArray(&d_im_move_array, &channelDesc, volumeSize) ); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); d_im_move_tex.normalized = false; d_im_move_tex.filterMode = hipFilterModeLinear; cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array, channelDesc)); // bind vector flows to texture cutilSafeCall( hipMalloc3DArray(&d_mv_x_array, &channelDesc, volumeSize) ); hipMemcpy3DParms copyParams_x = {0}; copyParams_x.srcPtr = make_hipPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_x.dstArray = d_mv_x_array; copyParams_x.extent = volumeSize; copyParams_x.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_x) ); d_mv_x_tex.normalized = false; d_mv_x_tex.filterMode = hipFilterModeLinear; cutilSafeCall( hipMalloc3DArray(&d_mv_y_array, &channelDesc, volumeSize) ); hipMemcpy3DParms copyParams_y = {0}; copyParams_y.srcPtr = make_hipPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_y.dstArray = d_mv_y_array; copyParams_y.extent = volumeSize; copyParams_y.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_y) ); d_mv_y_tex.normalized = false; d_mv_y_tex.filterMode = hipFilterModeLinear; cutilSafeCall( hipMalloc3DArray(&d_mv_z_array, &channelDesc, volumeSize) ); hipMemcpy3DParms copyParams_z = {0}; copyParams_z.srcPtr = make_hipPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_z.dstArray = d_mv_z_array; copyParams_z.extent = volumeSize; copyParams_z.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_z) ); d_mv_z_tex.normalized = false; d_mv_z_tex.filterMode = hipFilterModeLinear; float *d_im_out; cutilSafeCall( hipMalloc((void **)&d_im_out, sDATA_SIZE) ); // velocity float *d_v_x, *d_v_x_copy; float *d_v_y, *d_v_y_copy; float *d_v_z, *d_v_z_copy; cutilSafeCall( hipMalloc((void **)&d_v_x, sDATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_v_y, sDATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_v_z, sDATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_v_x_copy, sDATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_v_y_copy, sDATA_SIZE) ); cutilSafeCall( hipMalloc((void **)&d_v_z_copy, sDATA_SIZE) ); // setup for computing joint histogram via thrust // the grid data structure keeps a range per grid bucket: // each bucket_begin[i] indexes the first element of bucket i's list of points // each bucket_end[i] indexes one past the last element of bucket i's list of points thrust::device_vector<unsigned int> bucket_begin(nBin*nBin); thrust::device_vector<unsigned int> bucket_end(nBin*nBin); // allocate storage for each point's bucket index thrust::device_vector<unsigned int> bucket_indices(NX*NY*NZ); // allocate space to hold per-bucket sizes thrust::device_vector<unsigned int> bucket_sizes(nBin*nBin); // allocate float2 vector float2 *d_points; hipMalloc((void**) &d_points, sizeof(float2)*NX*NY*NZ); int regrid = 0; float MI[1000]; int3 Dims; Dims.x = NX; Dims.y = NY; Dims.z = NZ; for(int it=0; it<maxIter; it++) { // upate image hipLaunchKernelGGL(( ImageWarp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_im_out, NX, NY, NZ); // joint histogram via thrust ----- begin // convert to float2 vector hipLaunchKernelGGL(( transToFloat2), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_im_out, d_im_static, d_points, NX*NY*NZ); // use a thrust ptr to wrap the raw pointer thrust::device_ptr<float2> points_t(d_points); // transform the points to their bucket indices thrust::transform(points_t, points_t+NX*NY*NZ, bucket_indices.begin(), point_to_bucket_index(nBin,nBin)); // sort the bucket index thrust::sort(bucket_indices.begin(), bucket_indices.end()); // find the beginning of each bucket's list of points thrust::counting_iterator<unsigned int> search_begin(0); thrust::lower_bound(bucket_indices.begin(), bucket_indices.end(), search_begin, search_begin + nBin*nBin, bucket_begin.begin()); // find the end of each bucket's list of points thrust::upper_bound(bucket_indices.begin(), bucket_indices.end(), search_begin, search_begin + nBin*nBin, bucket_end.begin()); // take the difference between bounds to find each bucket size thrust::transform(bucket_end.begin(), bucket_end.end(), bucket_begin.begin(), bucket_sizes.begin(), thrust :: minus<unsigned int>()); // now hist contains the histogram unsigned int *hist = thrust::raw_pointer_cast(&bucket_sizes[0]); hipLaunchKernelGGL(( copyHist), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, hist, d_jointHistogram); // joint histogram via thrust ----- end // compute the convolution of joint histogram hipLaunchKernelGGL(( myconv2dGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram, d_jointHistogram_conv, GaussKernelH, nBin, nBin, 3*hValue); // normalize joint histogram float sum = hipblasSasum (nBin*nBin, d_jointHistogram_conv , 1); hipblasSscal (nBin*nBin, 1.0f/sum, d_jointHistogram_conv, 1); // compute mutual info by GPU hipLaunchKernelGGL(( marginalDist), dim3(nBin), dim3(nBin), 0, 0, d_jointHistogram_conv, d_probx, d_proby); switch (METHOD) { case 1: hipLaunchKernelGGL(( marginalBnorm_sum), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram); hipLaunchKernelGGL(( marginalDistAlongY), dim3(nBin), dim3(nBin), 0, 0, d_jointHistogram, d_Bsum); hipLaunchKernelGGL(( BnormGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby,d_Bsum, d_jointHistogram); break; case 2: hipLaunchKernelGGL(( mutualInfoGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram); break; } MI[it] = hipblasSasum (nBin*nBin, d_jointHistogram_conv, 1); printf("mutual information (%d)= %f\n", it, MI[it]); // NOTE: after this step, jointHistogram becomes the likelihood // compute the first derivative w.r.t. x-dim of joint histogram hipLaunchKernelGGL(( myconv2dGPU), dim3(nblocks_hist), dim3(NTHREAD_PER_BLOCK), 0, 0, d_jointHistogram, d_jointHistogram_conv, GaussKernelHx, nBin, nBin,3*hValue); // compute the force hipLaunchKernelGGL(( forceComp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_im_out, d_im_static, d_jointHistogram_conv, d_v_x, d_v_y, d_v_z, NX, NY, NZ); ImageSmooth(d_v_x, d_v_x_copy,Dims); ImageSmooth(d_v_y, d_v_y_copy,Dims); ImageSmooth(d_v_z, d_v_z_copy,Dims); hipLaunchKernelGGL(( flowComp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy, d_v_x, d_v_y, NX, NY, NZ); // NOTE: d_v_x is Jacobian, d_v_y is the max flow // d_v_x_copy, d_v_y_copy, d_v_z_copy are the displacement thrust :: device_ptr<float> data_ptr(d_v_y); int maxInd = hipblasIsamax(NX*NY*NZ, d_v_y, 1) -1; float maxflow = data_ptr[maxInd]; float dt = (du/maxflow); // > 1) ? 1 : du/maxflow; printf("dt = %f \n", dt); hipLaunchKernelGGL(( flowUpdate), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy,dt, NX, NY, NZ); // regridding if Jacobian < threshJaco sum = hipblasSasum(NX*NY*NZ, d_v_x, 1); if (sum>0.5) { regrid ++; printf("regrid = %d\n", regrid); // save d_im_move to be d_im_out hipUnbindTexture(d_im_move_tex); hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_out, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array)); // update vector flow hipLaunchKernelGGL(( ImageWarp_mv), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, NX, NY, NZ); hipMemcpy3DParms copyParams_x = {0}; copyParams_x.srcPtr = make_hipPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_x.dstArray = d_mv_x_array; copyParams_x.extent = volumeSize; copyParams_x.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_x) ); cutilSafeCall(hipBindTextureToArray(d_mv_x_tex, d_mv_x_array)); hipMemcpy3DParms copyParams_y = {0}; copyParams_y.srcPtr = make_hipPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_y.dstArray = d_mv_y_array; copyParams_y.extent = volumeSize; copyParams_y.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_y) ); cutilSafeCall(hipBindTextureToArray(d_mv_y_tex, d_mv_y_array)); hipMemcpy3DParms copyParams_z = {0}; copyParams_z.srcPtr = make_hipPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_z.dstArray = d_mv_z_array; copyParams_z.extent = volumeSize; copyParams_z.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams_z) ); cutilSafeCall(hipBindTextureToArray(d_mv_z_tex, d_mv_z_array)); cutilSafeCall( hipMemset(d_mv_x, 0, sDATA_SIZE) ); cutilSafeCall( hipMemset(d_mv_y, 0, sDATA_SIZE) ); cutilSafeCall( hipMemset(d_mv_z, 0, sDATA_SIZE) ); } // end for regridding } // for-loop iteration if (!regrid) { hipLaunchKernelGGL(( ImageWarp), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z, d_im_move, NX, NY, NZ); } else { hipMemcpy3DParms copyParams = {0}; hipUnbindTexture(d_im_move_tex); copyParams.srcPtr = make_hipPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyDeviceToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); cutilSafeCall(hipBindTextureToArray(d_im_move_tex, d_im_move_array)); hipLaunchKernelGGL(( ImageWarp_final), dim3(nblocks), dim3(NTHREAD_PER_BLOCK), 0, 0, d_mv_x, d_mv_y, d_mv_z,d_im_move, NX, NY, NZ); } hipFree(d_points); hipFree(d_v_x); hipFree(d_v_y); hipFree(d_v_z); hipFree(d_v_x_copy); hipFree(d_v_y_copy); hipFree(d_v_z_copy); hipUnbindTexture(d_im_move_tex); hipFreeArray(d_im_move_array); hipUnbindTexture(d_mv_x_tex); hipFreeArray(d_mv_x_array); hipUnbindTexture(d_mv_y_tex); hipFreeArray(d_mv_y_array); hipUnbindTexture(d_mv_z_tex); hipFreeArray(d_mv_z_array); hipFree(d_im_out); } __global__ void transToFloat2(const float *input1, const float *input2, float2 *output, const int n) { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; // obtain current id on thread if (tid < n) { output[tid] = make_float2(input1[tid], input2[tid]); } }
91ecb93c0d1cc35d152e5ed7e89eb5242f31e9de.cu
/******************************************************************* c* Multimodal Deformable Image Registration * c* via Mutual Information or Bhattacharyya Distantce * c* Version: 1.0 * c* Language: C, CUDA * c* * c* Developer: Yifei Lou * c* Email: [email protected] * c* * c* School of Electrical and Computer Engineering * c* Georgia Institute of Technology * c* Atlanta, GA, 30318 * c* Website: http://groups.bme.gatech.edu/groups/bil/ * c* * c* Copyright (c) 2011 * c* All rights reserved. * c* * c* Permission to use, copy, or modify this code and its * c* documentation for scientific purpose is hereby granted * c* without fee, provided that this copyright notice appear in * c* all copies and that both that copyright notice and this * c* permission notice appear in supporting documentation. The use * c* for commercial purposes is prohibited without permission. * c* * c* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * c* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * c* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * c* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * c* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * c* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * c* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT * c* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF* c* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * c* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * c* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * c* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * c* THE POSSIBILITY OF SUCH DAMAGE. * c* * c******************************************************************/ /******************************************************************* c* Short discription * c* main function to register two images on the current scale * c* including upsample and downsample * c******************************************************************/ #include <thrust/binary_search.h> #include <thrust/device_vector.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <cublas.h> #include <cutil.h> #include <cutil_inline.h> #include "viscous_convolution.h" #include "viscous_global.h" // hash a point in the unit square to the index of // the grid bucket that contains it struct point_to_bucket_index : public thrust::unary_function<float2,unsigned int> { __host__ __device__ point_to_bucket_index(unsigned int width, unsigned int height) :w(width),h(height){} __host__ __device__ unsigned int operator()(float2 p) const { // find the raster indices of p's bucket unsigned int x = static_cast<unsigned int>(p.x * (w-1)); unsigned int y = static_cast<unsigned int>(p.y * (h-1)); // return the bucket's linear index return y * w + x; } unsigned int w, h; }; __global__ void downSample(float *src, float *dest, int NX, int NY, int NZ, int s) { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; if(tid < NX*NY*NZ) { int z = tid/(NX*NY); int y = (tid%(NX*NY))/NX; int x = tid%NX; float sum =0.0f; for(int xs = 0; xs<s; xs++) for(int ys =0; ys<s; ys++) sum += src[s*x+xs + (s*y+ys)*NX0 + s*z*NX0*NY0]; dest[tid] = sum/s/s; } } __global__ void upSample(float *src, float *dest, int NX, int NY, int NZ) // upsampling { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; if(tid < NX*NY*NZ) { int z = tid/(NX*NY); int y = (tid%(NX*NY))/NX; int x = tid%NX; int xmin = x/2 - (x%2 == 0); int xmax = x/2 + (x%2 == 1); int ymin = y/2 - (y%2 == 0); int ymax = y/2 + (y%2 == 1); int zmin = z/2 - (z%2 == 0); int zmax = z/2 + (z%2 == 1); xmin = (xmin < 0) ? 0: xmin; ymin = (ymin < 0) ? 0: ymin; zmin = (zmin < 0) ? 0: zmin; xmax = (xmax < NX)? xmax : NX-1; ymax = (ymax < NY)? ymax : NY-1; zmax = (zmax < NZ)? zmax : NZ-1; float wx = 0.25 + 0.5*(x%2==0); float wy = 0.25 + 0.5*(y%2==0); float wz = 0.25 + 0.5*(z%2==0); dest[tid] = src[xmin + ymin*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * (1.0-wz) + src[xmax + ymin*NX/2 + zmin*NX/2*NY/2] * wx * (1.0-wy) * (1.0-wz) + src[xmin + ymax*NX/2 + zmin*NX/2*NY/2] * (1.0 - wx) * wy * (1.0-wz) + src[xmax + ymax*NX/2 + zmin*NX/2*NY/2] * wx * wy * (1.0-wz) + src[xmin + ymin*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * (1.0-wy) * wz + src[xmax + ymin*NX/2 + zmax*NX/2*NY/2] * wx * (1.0-wy) * wz + src[xmin + ymax*NX/2 + zmax*NX/2*NY/2] * (1.0 - wx) * wy * wz + src[xmax + ymax*NX/2 + zmax*NX/2*NY/2] * wx * wy * wz; dest[tid] = 2*dest[tid]; } } void compute(float *d_im_move, float *d_im_static, float *d_mv_x, float *d_mv_y, float *d_mv_z, int maxIter) // d_mv_x, d_mv_y and d_im_move are updated { // bind moving image to texture const cudaExtent volumeSize = make_cudaExtent(NX, NY, NZ); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cutilSafeCall( cudaMalloc3DArray(&d_im_move_array, &channelDesc, volumeSize) ); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); d_im_move_tex.normalized = false; d_im_move_tex.filterMode = cudaFilterModeLinear; cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array, channelDesc)); // bind vector flows to texture cutilSafeCall( cudaMalloc3DArray(&d_mv_x_array, &channelDesc, volumeSize) ); cudaMemcpy3DParms copyParams_x = {0}; copyParams_x.srcPtr = make_cudaPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_x.dstArray = d_mv_x_array; copyParams_x.extent = volumeSize; copyParams_x.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_x) ); d_mv_x_tex.normalized = false; d_mv_x_tex.filterMode = cudaFilterModeLinear; cutilSafeCall( cudaMalloc3DArray(&d_mv_y_array, &channelDesc, volumeSize) ); cudaMemcpy3DParms copyParams_y = {0}; copyParams_y.srcPtr = make_cudaPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_y.dstArray = d_mv_y_array; copyParams_y.extent = volumeSize; copyParams_y.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_y) ); d_mv_y_tex.normalized = false; d_mv_y_tex.filterMode = cudaFilterModeLinear; cutilSafeCall( cudaMalloc3DArray(&d_mv_z_array, &channelDesc, volumeSize) ); cudaMemcpy3DParms copyParams_z = {0}; copyParams_z.srcPtr = make_cudaPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_z.dstArray = d_mv_z_array; copyParams_z.extent = volumeSize; copyParams_z.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_z) ); d_mv_z_tex.normalized = false; d_mv_z_tex.filterMode = cudaFilterModeLinear; float *d_im_out; cutilSafeCall( cudaMalloc((void **)&d_im_out, sDATA_SIZE) ); // velocity float *d_v_x, *d_v_x_copy; float *d_v_y, *d_v_y_copy; float *d_v_z, *d_v_z_copy; cutilSafeCall( cudaMalloc((void **)&d_v_x, sDATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_v_y, sDATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_v_z, sDATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_v_x_copy, sDATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_v_y_copy, sDATA_SIZE) ); cutilSafeCall( cudaMalloc((void **)&d_v_z_copy, sDATA_SIZE) ); // setup for computing joint histogram via thrust // the grid data structure keeps a range per grid bucket: // each bucket_begin[i] indexes the first element of bucket i's list of points // each bucket_end[i] indexes one past the last element of bucket i's list of points thrust::device_vector<unsigned int> bucket_begin(nBin*nBin); thrust::device_vector<unsigned int> bucket_end(nBin*nBin); // allocate storage for each point's bucket index thrust::device_vector<unsigned int> bucket_indices(NX*NY*NZ); // allocate space to hold per-bucket sizes thrust::device_vector<unsigned int> bucket_sizes(nBin*nBin); // allocate float2 vector float2 *d_points; cudaMalloc((void**) &d_points, sizeof(float2)*NX*NY*NZ); int regrid = 0; float MI[1000]; int3 Dims; Dims.x = NX; Dims.y = NY; Dims.z = NZ; for(int it=0; it<maxIter; it++) { // upate image ImageWarp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_im_out, NX, NY, NZ); // joint histogram via thrust ----- begin // convert to float2 vector transToFloat2<<<nblocks, NTHREAD_PER_BLOCK>>>(d_im_out, d_im_static, d_points, NX*NY*NZ); // use a thrust ptr to wrap the raw pointer thrust::device_ptr<float2> points_t(d_points); // transform the points to their bucket indices thrust::transform(points_t, points_t+NX*NY*NZ, bucket_indices.begin(), point_to_bucket_index(nBin,nBin)); // sort the bucket index thrust::sort(bucket_indices.begin(), bucket_indices.end()); // find the beginning of each bucket's list of points thrust::counting_iterator<unsigned int> search_begin(0); thrust::lower_bound(bucket_indices.begin(), bucket_indices.end(), search_begin, search_begin + nBin*nBin, bucket_begin.begin()); // find the end of each bucket's list of points thrust::upper_bound(bucket_indices.begin(), bucket_indices.end(), search_begin, search_begin + nBin*nBin, bucket_end.begin()); // take the difference between bounds to find each bucket size thrust::transform(bucket_end.begin(), bucket_end.end(), bucket_begin.begin(), bucket_sizes.begin(), thrust :: minus<unsigned int>()); // now hist contains the histogram unsigned int *hist = thrust::raw_pointer_cast(&bucket_sizes[0]); copyHist<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(hist, d_jointHistogram); // joint histogram via thrust ----- end // compute the convolution of joint histogram myconv2dGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram, d_jointHistogram_conv, GaussKernelH, nBin, nBin, 3*hValue); // normalize joint histogram float sum = cublasSasum (nBin*nBin, d_jointHistogram_conv , 1); cublasSscal (nBin*nBin, 1.0f/sum, d_jointHistogram_conv, 1); // compute mutual info by GPU marginalDist<<<nBin, nBin>>>(d_jointHistogram_conv, d_probx, d_proby); switch (METHOD) { case 1: marginalBnorm_sum<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram); marginalDistAlongY<<<nBin, nBin>>>(d_jointHistogram, d_Bsum); BnormGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby,d_Bsum, d_jointHistogram); break; case 2: mutualInfoGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram_conv, d_probx, d_proby, d_jointHistogram); break; } MI[it] = cublasSasum (nBin*nBin, d_jointHistogram_conv, 1); printf("mutual information (%d)= %f\n", it, MI[it]); // NOTE: after this step, jointHistogram becomes the likelihood // compute the first derivative w.r.t. x-dim of joint histogram myconv2dGPU<<<nblocks_hist, NTHREAD_PER_BLOCK>>>(d_jointHistogram, d_jointHistogram_conv, GaussKernelHx, nBin, nBin,3*hValue); // compute the force forceComp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_im_out, d_im_static, d_jointHistogram_conv, d_v_x, d_v_y, d_v_z, NX, NY, NZ); ImageSmooth(d_v_x, d_v_x_copy,Dims); ImageSmooth(d_v_y, d_v_y_copy,Dims); ImageSmooth(d_v_z, d_v_z_copy,Dims); flowComp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy, d_v_x, d_v_y, NX, NY, NZ); // NOTE: d_v_x is Jacobian, d_v_y is the max flow // d_v_x_copy, d_v_y_copy, d_v_z_copy are the displacement thrust :: device_ptr<float> data_ptr(d_v_y); int maxInd = cublasIsamax(NX*NY*NZ, d_v_y, 1) -1; float maxflow = data_ptr[maxInd]; float dt = (du/maxflow); // > 1) ? 1 : du/maxflow; printf("dt = %f \n", dt); flowUpdate<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_v_x_copy, d_v_y_copy, d_v_z_copy,dt, NX, NY, NZ); // regridding if Jacobian < threshJaco sum = cublasSasum(NX*NY*NZ, d_v_x, 1); if (sum>0.5) { regrid ++; printf("regrid = %d\n", regrid); // save d_im_move to be d_im_out cudaUnbindTexture(d_im_move_tex); cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_out, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array)); // update vector flow ImageWarp_mv<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, NX, NY, NZ); cudaMemcpy3DParms copyParams_x = {0}; copyParams_x.srcPtr = make_cudaPitchedPtr((void*)d_mv_x, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_x.dstArray = d_mv_x_array; copyParams_x.extent = volumeSize; copyParams_x.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_x) ); cutilSafeCall(cudaBindTextureToArray(d_mv_x_tex, d_mv_x_array)); cudaMemcpy3DParms copyParams_y = {0}; copyParams_y.srcPtr = make_cudaPitchedPtr((void*)d_mv_y, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_y.dstArray = d_mv_y_array; copyParams_y.extent = volumeSize; copyParams_y.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_y) ); cutilSafeCall(cudaBindTextureToArray(d_mv_y_tex, d_mv_y_array)); cudaMemcpy3DParms copyParams_z = {0}; copyParams_z.srcPtr = make_cudaPitchedPtr((void*)d_mv_z, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams_z.dstArray = d_mv_z_array; copyParams_z.extent = volumeSize; copyParams_z.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams_z) ); cutilSafeCall(cudaBindTextureToArray(d_mv_z_tex, d_mv_z_array)); cutilSafeCall( cudaMemset(d_mv_x, 0, sDATA_SIZE) ); cutilSafeCall( cudaMemset(d_mv_y, 0, sDATA_SIZE) ); cutilSafeCall( cudaMemset(d_mv_z, 0, sDATA_SIZE) ); } // end for regridding } // for-loop iteration if (!regrid) { ImageWarp<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z, d_im_move, NX, NY, NZ); } else { cudaMemcpy3DParms copyParams = {0}; cudaUnbindTexture(d_im_move_tex); copyParams.srcPtr = make_cudaPitchedPtr((void*)d_im_move, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height); copyParams.dstArray = d_im_move_array; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyDeviceToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); cutilSafeCall(cudaBindTextureToArray(d_im_move_tex, d_im_move_array)); ImageWarp_final<<<nblocks, NTHREAD_PER_BLOCK>>>(d_mv_x, d_mv_y, d_mv_z,d_im_move, NX, NY, NZ); } cudaFree(d_points); cudaFree(d_v_x); cudaFree(d_v_y); cudaFree(d_v_z); cudaFree(d_v_x_copy); cudaFree(d_v_y_copy); cudaFree(d_v_z_copy); cudaUnbindTexture(d_im_move_tex); cudaFreeArray(d_im_move_array); cudaUnbindTexture(d_mv_x_tex); cudaFreeArray(d_mv_x_array); cudaUnbindTexture(d_mv_y_tex); cudaFreeArray(d_mv_y_array); cudaUnbindTexture(d_mv_z_tex); cudaFreeArray(d_mv_z_array); cudaFree(d_im_out); } __global__ void transToFloat2(const float *input1, const float *input2, float2 *output, const int n) { const int tid = (blockIdx.y*NBLOCKX + blockIdx.x)*blockDim.x + threadIdx.x; // obtain current id on thread if (tid < n) { output[tid] = make_float2(input1[tid], input2[tid]); } }
8b904f9cfd529f45270bf268f2336c8511dc9805.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <errno.h> #include <getopt.h> #include <limits.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "c63_cuda.h" #include "me.h" #include "sisci_common.h" namespace gpu = c63::gpu; static const int Y = Y_COMPONENT; static const int U = U_COMPONENT; static const int V = V_COMPONENT; __device__ static void min_warp_reduce(int i, volatile int* values) { values[i] = min(values[i], values[i + 32]); values[i] = min(values[i], values[i + 16]); values[i] = min(values[i], values[i + 8]); values[i] = min(values[i], values[i + 4]); values[i] = min(values[i], values[i + 2]); values[i] = min(values[i], values[i + 1]); } template<int block_size> __device__ static void min_reduce(int i, int* values) { if (i < block_size/2) { // Intentionally no break between cases switch (block_size) { case 1024: values[i] = min(values[i], values[i + 512]); __syncthreads(); case 512: values[i] = min(values[i], values[i + 256]); __syncthreads(); case 256: values[i] = min(values[i], values[i + 128]); __syncthreads(); case 128: values[i] = min(values[i], values[i + 64]); __syncthreads(); } if (i < 32) { min_warp_reduce(i, values); } } else { switch (block_size) { case 1024: __syncthreads(); case 512: __syncthreads(); case 256: __syncthreads(); case 128: __syncthreads(); } } } template<int range> __global__ static void me_block_8x8_gpu_Y(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref, const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops, const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results) { const int i = threadIdx.x; const int j = threadIdx.y; const int tid = j*blockDim.x + i; const int ref_mb_id = j*4*blockDim.x + i; const int ref_mb2_id = (j*4+1)*blockDim.x + i; const int ref_mb3_id = (j*4+2)*blockDim.x + i; const int ref_mb4_id = (j*4+3)*blockDim.x + i; const int mb_x = blockIdx.x; const int mb_y = blockIdx.y; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int right = rights[mb_x]; const int bottom = bottoms[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; const uint8_t* orig_block = orig + my * w + mx; const uint8_t* ref_search_range = ref + top*w + left; __shared__ uint8_t shared_orig_block[64]; if (i < 8 && j < 8) { shared_orig_block[j*8 + i] = orig_block[j*w + i]; } __syncthreads(); int block_sad = INT_MAX; int block2_sad = INT_MAX; int block3_sad = INT_MAX; int block4_sad = INT_MAX; const int range_width = right - left; const int range_height = (bottom - top)/4; const unsigned int mask = 0x3210 + 0x1111 * (i%4); // (i/4)*4 rounds i down to the nearest integer divisible by 4 const uint8_t* ref_block_top_row_aligned = ref_search_range + (j*4)*w + (i/4)*4; if (j < range_height && i < range_width) { block_sad = 0; block2_sad = 0; block3_sad = 0; block4_sad = 0; #pragma unroll for (int y = 0; y < 8; ++y) { uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y*w); uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1], mask); uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2], mask); uint32_t* ref_block2_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+1)*w); uint32_t ref_row2_left = __byte_perm(ref_block2_row_aligned[0], ref_block2_row_aligned[1], mask); uint32_t ref_row2_right = __byte_perm(ref_block2_row_aligned[1], ref_block2_row_aligned[2], mask); uint32_t* ref_block3_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+2)*w); uint32_t ref_row3_left = __byte_perm(ref_block3_row_aligned[0], ref_block3_row_aligned[1], mask); uint32_t ref_row3_right = __byte_perm(ref_block3_row_aligned[1], ref_block3_row_aligned[2], mask); uint32_t* ref_block4_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+3)*w); uint32_t ref_row4_left = __byte_perm(ref_block4_row_aligned[0], ref_block4_row_aligned[1], mask); uint32_t ref_row4_right = __byte_perm(ref_block4_row_aligned[1], ref_block4_row_aligned[2], mask); uint8_t* orig_block_row = shared_orig_block + y*8; uint32_t orig_row_left = *((uint32_t*) orig_block_row); uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1); block_sad += __vsadu4(ref_row_left, orig_row_left); block_sad += __vsadu4(ref_row_right, orig_row_right); block2_sad += __vsadu4(ref_row2_left, orig_row_left); block2_sad += __vsadu4(ref_row2_right, orig_row_right); block3_sad += __vsadu4(ref_row3_left, orig_row_left); block3_sad += __vsadu4(ref_row3_right, orig_row_right); block4_sad += __vsadu4(ref_row4_left, orig_row_left); block4_sad += __vsadu4(ref_row4_right, orig_row_right); } } __shared__ int block_sads[32*32]; block_sads[ref_mb_id] = block_sad; block_sads[ref_mb2_id] = block2_sad; block_sads[ref_mb3_id] = block3_sad; block_sads[ref_mb4_id] = block4_sad; __syncthreads(); block_sads[tid] = min(block_sads[tid], block_sads[tid + 512]); block_sads[tid + 256] = min(block_sads[tid + 256], block_sads[tid + 768]); __syncthreads(); block_sads[tid] = min(block_sads[tid], block_sads[tid + 256]); __syncthreads(); if (tid < 128) { block_sads[tid] = min(block_sads[tid], block_sads[tid + 128]); } __syncthreads(); if (tid < 64) { block_sads[tid] = min(block_sads[tid], block_sads[tid + 64]); } __syncthreads(); if (tid < 32) { min_warp_reduce(tid, block_sads); } __syncthreads(); int min = block_sads[0]; if (block_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb_id); } if (block2_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb2_id); } if (block3_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb3_id); } if (block4_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb4_id); } } template<int range> __global__ static void me_block_8x8_gpu_UV(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref, const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops, const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results) { const int i = threadIdx.x; const int j = threadIdx.y; const int ref_mb_id = j*blockDim.x + i; const int mb_x = blockIdx.x; const int mb_y = blockIdx.y; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int right = rights[mb_x]; const int bottom = bottoms[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; const uint8_t* orig_block = orig + my * w + mx; const uint8_t* ref_search_range = ref + top*w + left; __shared__ uint8_t shared_orig_block[64]; if (i < 8 && j < 8) { shared_orig_block[j*8 + i] = orig_block[j*w + i]; } __syncthreads(); int block_sad = INT_MAX; const int range_width = right - left; const int range_height = bottom - top; const unsigned int mask = 0x3210 + 0x1111 * (i%4); // (i/4)*4 rounds i down to the nearest integer divisible by 4 const uint8_t* ref_block_top_row_aligned = ref_search_range + j*w + (i/4)*4; if (j < range_height && i < range_width) { block_sad = 0; #pragma unroll for (unsigned int y = 8; y--; ) { uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y*w); uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1], mask); uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2], mask); uint8_t* orig_block_row = shared_orig_block + y*8; uint32_t orig_row_left = *((uint32_t*) orig_block_row); uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1); block_sad += __vsadu4(ref_row_left, orig_row_left); block_sad += __vsadu4(ref_row_right, orig_row_right); } } const int max_range_width = range * 2; const int max_range_height = range * 2; const int max_mb_count = max_range_width * max_range_height; __shared__ int block_sads[max_mb_count]; block_sads[ref_mb_id] = block_sad; __syncthreads(); min_reduce<max_mb_count>(ref_mb_id, block_sads); __syncthreads(); if (block_sad == block_sads[0]) { atomicMin(index_results + orig_mb_id, ref_mb_id); } } template<int range> __global__ static void set_motion_vectors(struct macroblock* __restrict__ mbs, const int* __restrict__ lefts, const int* __restrict__ tops, const unsigned int* __restrict__ index_results) { const int mb_x = blockIdx.x; const int mb_y = threadIdx.x; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; int index_result = index_results[orig_mb_id]; /* Here, there should be a threshold on SAD that checks if the motion vector is cheaper than intraprediction. We always assume MV to be beneficial */ struct macroblock* mb = &mbs[orig_mb_id]; mb->use_mv = 1; mb->mv_x = left + (index_result % (range*2)) - mx; mb->mv_y = top + (index_result / (range*2)) - my; } template<int component> void gpu::c63_motion_estimate(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda) { const int w = cm->padw[component]; const int cols = cm->mb_cols[component]; const int rows = cm->mb_rows[component]; const int range = ME_RANGE(component); const struct boundaries& bound = cm_gpu.me_boundaries[component]; const hipStream_t stream = c63_cuda.stream[component]; unsigned int* sad_indexes = cm_gpu.sad_index_results[component]; struct macroblock* mb = cm->curframe->mbs[component]; struct macroblock* mb_gpu = cm->curframe->mbs_gpu[component]; uint8_t* orig; uint8_t* ref; switch (component) { case Y_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->Y; ref = cm->refframe->recons_gpu->Y; break; case U_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->U; ref = cm->refframe->recons_gpu->U; break; case V_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->V; ref = cm->refframe->recons_gpu->V; break; } hipMemsetAsync(sad_indexes, 255, cols * rows * sizeof(unsigned int), stream); dim3 numBlocks(cols, rows); if (component == Y_COMPONENT) { // Luma dim3 threadsPerBlock(range * 2, range / 2); hipLaunchKernelGGL(( me_block_8x8_gpu_Y<range>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, orig, ref, bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes); } else { // Chroma dim3 threadsPerBlock(range * 2, range * 2); hipLaunchKernelGGL(( me_block_8x8_gpu_UV<range>), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, orig, ref, bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes); } hipLaunchKernelGGL(( set_motion_vectors<range>), dim3(cols), dim3(rows), 0, stream, mb_gpu, bound.left, bound.top, sad_indexes); hipEvent_t me_done = c63_cuda.me_done[component]; hipEventRecord(me_done, stream); hipStream_t memcpy_stream = c63_cuda.memcpy_stream[component]; hipStreamWaitEvent(memcpy_stream, me_done, 0); hipMemcpyAsync(mb, mb_gpu, cols * rows * sizeof(struct macroblock), hipMemcpyDeviceToHost, memcpy_stream); } /* Motion compensation for 8x8 block */ __global__ static void mc_block_8x8_gpu(const struct macroblock* __restrict__ mbs, int w, uint8_t __restrict__ *predicted, const uint8_t __restrict__ *ref) { const int mb_index = (blockIdx.x + blockIdx.y * gridDim.x); const int block_offset = mb_index * blockDim.x * blockDim.y; const int i = threadIdx.y; const int j = threadIdx.x; const struct macroblock* mb = &mbs[mb_index]; // We always assume MV to be beneficial //if (!mb->use_mv) { // return; //} const int mv_x = mb->mv_x; const int mv_y = mb->mv_y; /* Copy pixel from ref mandated by MV */ predicted[block_offset + i * 8 + j] = ref[(i + blockIdx.y*8 + mv_y) * w + (j + blockIdx.x*8 + mv_x)]; } template<int component> void gpu::c63_motion_compensate(struct c63_common *cm, const struct c63_cuda& c63_cuda) { const int w = cm->padw[component]; const int h = cm->padh[component]; const struct macroblock* mb = cm->curframe->mbs_gpu[component]; const hipStream_t stream = c63_cuda.stream[component]; uint8_t* pred; uint8_t* ref; switch (component) { case Y_COMPONENT: pred = cm->curframe->predicted_gpu->Y; ref = cm->refframe->recons_gpu->Y; break; case U_COMPONENT: pred = cm->curframe->predicted_gpu->U; ref = cm->refframe->recons_gpu->U; break; case V_COMPONENT: pred = cm->curframe->predicted_gpu->V; ref = cm->refframe->recons_gpu->V; break; } const dim3 threadsPerBlock(8, 8); const dim3 numBlocks(w / threadsPerBlock.x, h / threadsPerBlock.y); hipLaunchKernelGGL(( mc_block_8x8_gpu), dim3(numBlocks), dim3(threadsPerBlock), 0, stream, mb, w, pred, ref); } template void gpu::c63_motion_estimate<Y>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_estimate<U>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_estimate<V>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<Y>(struct c63_common *cm, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<U>(struct c63_common *cm, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<V>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
8b904f9cfd529f45270bf268f2336c8511dc9805.cu
#include <assert.h> #include <errno.h> #include <getopt.h> #include <limits.h> #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "c63_cuda.h" #include "me.h" #include "sisci_common.h" namespace gpu = c63::gpu; static const int Y = Y_COMPONENT; static const int U = U_COMPONENT; static const int V = V_COMPONENT; __device__ static void min_warp_reduce(int i, volatile int* values) { values[i] = min(values[i], values[i + 32]); values[i] = min(values[i], values[i + 16]); values[i] = min(values[i], values[i + 8]); values[i] = min(values[i], values[i + 4]); values[i] = min(values[i], values[i + 2]); values[i] = min(values[i], values[i + 1]); } template<int block_size> __device__ static void min_reduce(int i, int* values) { if (i < block_size/2) { // Intentionally no break between cases switch (block_size) { case 1024: values[i] = min(values[i], values[i + 512]); __syncthreads(); case 512: values[i] = min(values[i], values[i + 256]); __syncthreads(); case 256: values[i] = min(values[i], values[i + 128]); __syncthreads(); case 128: values[i] = min(values[i], values[i + 64]); __syncthreads(); } if (i < 32) { min_warp_reduce(i, values); } } else { switch (block_size) { case 1024: __syncthreads(); case 512: __syncthreads(); case 256: __syncthreads(); case 128: __syncthreads(); } } } template<int range> __global__ static void me_block_8x8_gpu_Y(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref, const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops, const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results) { const int i = threadIdx.x; const int j = threadIdx.y; const int tid = j*blockDim.x + i; const int ref_mb_id = j*4*blockDim.x + i; const int ref_mb2_id = (j*4+1)*blockDim.x + i; const int ref_mb3_id = (j*4+2)*blockDim.x + i; const int ref_mb4_id = (j*4+3)*blockDim.x + i; const int mb_x = blockIdx.x; const int mb_y = blockIdx.y; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int right = rights[mb_x]; const int bottom = bottoms[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; const uint8_t* orig_block = orig + my * w + mx; const uint8_t* ref_search_range = ref + top*w + left; __shared__ uint8_t shared_orig_block[64]; if (i < 8 && j < 8) { shared_orig_block[j*8 + i] = orig_block[j*w + i]; } __syncthreads(); int block_sad = INT_MAX; int block2_sad = INT_MAX; int block3_sad = INT_MAX; int block4_sad = INT_MAX; const int range_width = right - left; const int range_height = (bottom - top)/4; const unsigned int mask = 0x3210 + 0x1111 * (i%4); // (i/4)*4 rounds i down to the nearest integer divisible by 4 const uint8_t* ref_block_top_row_aligned = ref_search_range + (j*4)*w + (i/4)*4; if (j < range_height && i < range_width) { block_sad = 0; block2_sad = 0; block3_sad = 0; block4_sad = 0; #pragma unroll for (int y = 0; y < 8; ++y) { uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y*w); uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1], mask); uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2], mask); uint32_t* ref_block2_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+1)*w); uint32_t ref_row2_left = __byte_perm(ref_block2_row_aligned[0], ref_block2_row_aligned[1], mask); uint32_t ref_row2_right = __byte_perm(ref_block2_row_aligned[1], ref_block2_row_aligned[2], mask); uint32_t* ref_block3_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+2)*w); uint32_t ref_row3_left = __byte_perm(ref_block3_row_aligned[0], ref_block3_row_aligned[1], mask); uint32_t ref_row3_right = __byte_perm(ref_block3_row_aligned[1], ref_block3_row_aligned[2], mask); uint32_t* ref_block4_row_aligned = (uint32_t*) (ref_block_top_row_aligned + (y+3)*w); uint32_t ref_row4_left = __byte_perm(ref_block4_row_aligned[0], ref_block4_row_aligned[1], mask); uint32_t ref_row4_right = __byte_perm(ref_block4_row_aligned[1], ref_block4_row_aligned[2], mask); uint8_t* orig_block_row = shared_orig_block + y*8; uint32_t orig_row_left = *((uint32_t*) orig_block_row); uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1); block_sad += __vsadu4(ref_row_left, orig_row_left); block_sad += __vsadu4(ref_row_right, orig_row_right); block2_sad += __vsadu4(ref_row2_left, orig_row_left); block2_sad += __vsadu4(ref_row2_right, orig_row_right); block3_sad += __vsadu4(ref_row3_left, orig_row_left); block3_sad += __vsadu4(ref_row3_right, orig_row_right); block4_sad += __vsadu4(ref_row4_left, orig_row_left); block4_sad += __vsadu4(ref_row4_right, orig_row_right); } } __shared__ int block_sads[32*32]; block_sads[ref_mb_id] = block_sad; block_sads[ref_mb2_id] = block2_sad; block_sads[ref_mb3_id] = block3_sad; block_sads[ref_mb4_id] = block4_sad; __syncthreads(); block_sads[tid] = min(block_sads[tid], block_sads[tid + 512]); block_sads[tid + 256] = min(block_sads[tid + 256], block_sads[tid + 768]); __syncthreads(); block_sads[tid] = min(block_sads[tid], block_sads[tid + 256]); __syncthreads(); if (tid < 128) { block_sads[tid] = min(block_sads[tid], block_sads[tid + 128]); } __syncthreads(); if (tid < 64) { block_sads[tid] = min(block_sads[tid], block_sads[tid + 64]); } __syncthreads(); if (tid < 32) { min_warp_reduce(tid, block_sads); } __syncthreads(); int min = block_sads[0]; if (block_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb_id); } if (block2_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb2_id); } if (block3_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb3_id); } if (block4_sad == min) { atomicMin(index_results + orig_mb_id, ref_mb4_id); } } template<int range> __global__ static void me_block_8x8_gpu_UV(const uint8_t* __restrict__ orig, const uint8_t* __restrict__ ref, const int* __restrict__ lefts, const int* __restrict__ rights, const int* __restrict__ tops, const int* __restrict__ bottoms, int w, unsigned int* __restrict__ index_results) { const int i = threadIdx.x; const int j = threadIdx.y; const int ref_mb_id = j*blockDim.x + i; const int mb_x = blockIdx.x; const int mb_y = blockIdx.y; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int right = rights[mb_x]; const int bottom = bottoms[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; const uint8_t* orig_block = orig + my * w + mx; const uint8_t* ref_search_range = ref + top*w + left; __shared__ uint8_t shared_orig_block[64]; if (i < 8 && j < 8) { shared_orig_block[j*8 + i] = orig_block[j*w + i]; } __syncthreads(); int block_sad = INT_MAX; const int range_width = right - left; const int range_height = bottom - top; const unsigned int mask = 0x3210 + 0x1111 * (i%4); // (i/4)*4 rounds i down to the nearest integer divisible by 4 const uint8_t* ref_block_top_row_aligned = ref_search_range + j*w + (i/4)*4; if (j < range_height && i < range_width) { block_sad = 0; #pragma unroll for (unsigned int y = 8; y--; ) { uint32_t* ref_block_row_aligned = (uint32_t*) (ref_block_top_row_aligned + y*w); uint32_t ref_row_left = __byte_perm(ref_block_row_aligned[0], ref_block_row_aligned[1], mask); uint32_t ref_row_right = __byte_perm(ref_block_row_aligned[1], ref_block_row_aligned[2], mask); uint8_t* orig_block_row = shared_orig_block + y*8; uint32_t orig_row_left = *((uint32_t*) orig_block_row); uint32_t orig_row_right = *((uint32_t*) orig_block_row + 1); block_sad += __vsadu4(ref_row_left, orig_row_left); block_sad += __vsadu4(ref_row_right, orig_row_right); } } const int max_range_width = range * 2; const int max_range_height = range * 2; const int max_mb_count = max_range_width * max_range_height; __shared__ int block_sads[max_mb_count]; block_sads[ref_mb_id] = block_sad; __syncthreads(); min_reduce<max_mb_count>(ref_mb_id, block_sads); __syncthreads(); if (block_sad == block_sads[0]) { atomicMin(index_results + orig_mb_id, ref_mb_id); } } template<int range> __global__ static void set_motion_vectors(struct macroblock* __restrict__ mbs, const int* __restrict__ lefts, const int* __restrict__ tops, const unsigned int* __restrict__ index_results) { const int mb_x = blockIdx.x; const int mb_y = threadIdx.x; const int orig_mb_id = mb_y*gridDim.x + mb_x; const int left = lefts[mb_x]; const int top = tops[mb_y]; const int mx = mb_x * 8; const int my = mb_y * 8; int index_result = index_results[orig_mb_id]; /* Here, there should be a threshold on SAD that checks if the motion vector is cheaper than intraprediction. We always assume MV to be beneficial */ struct macroblock* mb = &mbs[orig_mb_id]; mb->use_mv = 1; mb->mv_x = left + (index_result % (range*2)) - mx; mb->mv_y = top + (index_result / (range*2)) - my; } template<int component> void gpu::c63_motion_estimate(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda) { const int w = cm->padw[component]; const int cols = cm->mb_cols[component]; const int rows = cm->mb_rows[component]; const int range = ME_RANGE(component); const struct boundaries& bound = cm_gpu.me_boundaries[component]; const cudaStream_t stream = c63_cuda.stream[component]; unsigned int* sad_indexes = cm_gpu.sad_index_results[component]; struct macroblock* mb = cm->curframe->mbs[component]; struct macroblock* mb_gpu = cm->curframe->mbs_gpu[component]; uint8_t* orig; uint8_t* ref; switch (component) { case Y_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->Y; ref = cm->refframe->recons_gpu->Y; break; case U_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->U; ref = cm->refframe->recons_gpu->U; break; case V_COMPONENT: orig = (uint8_t*) cm->curframe->orig_gpu->V; ref = cm->refframe->recons_gpu->V; break; } cudaMemsetAsync(sad_indexes, 255, cols * rows * sizeof(unsigned int), stream); dim3 numBlocks(cols, rows); if (component == Y_COMPONENT) { // Luma dim3 threadsPerBlock(range * 2, range / 2); me_block_8x8_gpu_Y<range><<<numBlocks, threadsPerBlock, 0, stream>>>(orig, ref, bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes); } else { // Chroma dim3 threadsPerBlock(range * 2, range * 2); me_block_8x8_gpu_UV<range><<<numBlocks, threadsPerBlock, 0, stream>>>(orig, ref, bound.left, bound.right, bound.top, bound.bottom, w, sad_indexes); } set_motion_vectors<range><<<cols, rows, 0, stream>>>(mb_gpu, bound.left, bound.top, sad_indexes); cudaEvent_t me_done = c63_cuda.me_done[component]; cudaEventRecord(me_done, stream); cudaStream_t memcpy_stream = c63_cuda.memcpy_stream[component]; cudaStreamWaitEvent(memcpy_stream, me_done, 0); cudaMemcpyAsync(mb, mb_gpu, cols * rows * sizeof(struct macroblock), cudaMemcpyDeviceToHost, memcpy_stream); } /* Motion compensation for 8x8 block */ __global__ static void mc_block_8x8_gpu(const struct macroblock* __restrict__ mbs, int w, uint8_t __restrict__ *predicted, const uint8_t __restrict__ *ref) { const int mb_index = (blockIdx.x + blockIdx.y * gridDim.x); const int block_offset = mb_index * blockDim.x * blockDim.y; const int i = threadIdx.y; const int j = threadIdx.x; const struct macroblock* mb = &mbs[mb_index]; // We always assume MV to be beneficial //if (!mb->use_mv) { // return; //} const int mv_x = mb->mv_x; const int mv_y = mb->mv_y; /* Copy pixel from ref mandated by MV */ predicted[block_offset + i * 8 + j] = ref[(i + blockIdx.y*8 + mv_y) * w + (j + blockIdx.x*8 + mv_x)]; } template<int component> void gpu::c63_motion_compensate(struct c63_common *cm, const struct c63_cuda& c63_cuda) { const int w = cm->padw[component]; const int h = cm->padh[component]; const struct macroblock* mb = cm->curframe->mbs_gpu[component]; const cudaStream_t stream = c63_cuda.stream[component]; uint8_t* pred; uint8_t* ref; switch (component) { case Y_COMPONENT: pred = cm->curframe->predicted_gpu->Y; ref = cm->refframe->recons_gpu->Y; break; case U_COMPONENT: pred = cm->curframe->predicted_gpu->U; ref = cm->refframe->recons_gpu->U; break; case V_COMPONENT: pred = cm->curframe->predicted_gpu->V; ref = cm->refframe->recons_gpu->V; break; } const dim3 threadsPerBlock(8, 8); const dim3 numBlocks(w / threadsPerBlock.x, h / threadsPerBlock.y); mc_block_8x8_gpu<<<numBlocks, threadsPerBlock, 0, stream>>>(mb, w, pred, ref); } template void gpu::c63_motion_estimate<Y>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_estimate<U>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_estimate<V>(struct c63_common *cm, const struct c63_common_gpu& cm_gpu, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<Y>(struct c63_common *cm, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<U>(struct c63_common *cm, const struct c63_cuda& c63_cuda); template void gpu::c63_motion_compensate<V>(struct c63_common *cm, const struct c63_cuda& c63_cuda);
1a440e9caabf4bdd7406eb5abaa8772ea1d3f243.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "ode.h" #include "int_rungekutta4.h" #include "macro.h" #include "redutil2.h" using namespace redutil2; static const var_t lambda = 1.0/10.0; /* * Dormand, J. R.; Prince, P. J. * "New Runge-Kutta algorithms for numerical simulation in dynamical astronomy" * Celestial Mechanics, vol. 18, Oct. 1978, p. 223-232. * p. 225 Table I. Runge-Kutta 4(3)T */ // The Runge-Kutta matrix var_t int_rungekutta4::a[] = { 0.0, 0.0, 0.0, 0.0, // y = yn -> k1 1.0/2.0, 0.0, 0.0, 0.0, // y = ytmp = yn + h/2*k1 -> k2 0.0, 1.0/2.0, 0.0, 0.0, // y = ytmp = yn + h/2*k2 -> k3 0.0, 0.0, 1.0, 0.0, // y = ytmp = yn + h*k3 -> k4 /*---------------------------------------------------------------------------------------*/ 1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0 // y = yn + h/6*(k1 + 2*k1 + 2*k3 + k4) -> k5 }; /* 5 x 4 matrix */ static uint16_t a_row = 5; static uint16_t a_col = 4; // weights var_t int_rungekutta4::bh[] = { 1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0 }; // nodes var_t int_rungekutta4::c[] = { 0.0, 1.0/2.0, 1.0/2.0, 1.0, 1.0 }; // These arrays will contain the stepsize multiplied by the constants var_t int_rungekutta4::h_a[sizeof(int_rungekutta4::a) / sizeof(var_t)]; var_t int_rungekutta4::h_bh[sizeof(int_rungekutta4::bh) / sizeof(var_t)]; //__constant__ var_t dc_a[sizeof(int_rungekutta4::a) / sizeof(var_t)]; //__constant__ var_t dc_bh[sizeof(int_rungekutta4::bh) / sizeof(var_t)]; int_rungekutta4::int_rungekutta4(ode& f, bool adaptive, var_t tolerance, comp_dev_t comp_dev) : integrator(f, adaptive, tolerance, (adaptive ? 5 : 4), comp_dev) { name = "Runge-Kutta4"; n_order = 4; d_a = NULL; d_bh = NULL; check_Butcher_tableau(); if (PROC_UNIT_GPU == comp_dev.proc_unit) { allocate_Butcher_tableau(); } } int_rungekutta4::~int_rungekutta4() { if (PROC_UNIT_GPU == comp_dev.proc_unit) { deallocate_Butcher_tableau(); } } void int_rungekutta4::allocate_Butcher_tableau() { ALLOCATE_DEVICE_VECTOR((void**)&d_a, sizeof(a)); ALLOCATE_DEVICE_VECTOR((void**)&d_bh, sizeof(bh)); } void int_rungekutta4::deallocate_Butcher_tableau() { FREE_DEVICE_VECTOR((void**)&d_a); FREE_DEVICE_VECTOR((void**)&d_bh); } void int_rungekutta4::check_Butcher_tableau() { uint16_t n_c = sizeof(int_rungekutta4::c) / sizeof(var_t); uint16_t n_col = (sizeof(int_rungekutta4::a) / sizeof(var_t)) / n_c; for (uint16_t i = 0; i < n_c; i++) { var_t sum = 0.0; for (uint16_t j = 0; j < n_col; j++) { uint16_t k = i * n_col + j; sum += a[k]; } if (1.0e-15 < fabs(sum - c[i])) { throw std::string("The Runge-Kutta 4 is not consistent (sum(a_ij) != c_i.)"); } } } void int_rungekutta4::calc_ytemp(uint16_t stage) { if (PROC_UNIT_GPU == comp_dev.proc_unit) { var_t* coeff = d_a + stage * a_col; gpu_calc_lin_comb_s(ytemp, f.y, d_k, coeff, stage, f.n_var, comp_dev.id_dev, optimize); } else { var_t* coeff = h_a + stage * a_col; tools::calc_lin_comb_s(ytemp, f.y, h_k, coeff, stage, f.n_var); } } void int_rungekutta4::calc_y_np1() { if (PROC_UNIT_GPU == comp_dev.proc_unit) { var_t* coeff = d_bh; gpu_calc_lin_comb_s(f.yout, f.y, d_k, coeff, 4, f.n_var, comp_dev.id_dev, optimize); } else { var_t* coeff = h_bh; tools::calc_lin_comb_s(f.yout, f.y, h_k, coeff, 4, f.n_var); } } void int_rungekutta4::calc_error(uint32_t n) { if (PROC_UNIT_GPU == comp_dev.proc_unit) { gpu_calc_rk4_error(err, k[3], k[4], n, comp_dev.id_dev, optimize); } else { for (uint32_t i = 0; i < n; i++) { h_err[i] = fabs(h_k[3][i] - h_k[4][i]); } } } var_t int_rungekutta4::step() { static std::string err_msg1 = "The integrator could not provide the approximation of the solution with the specified tolerance."; static const uint16_t n_a = sizeof(int_rungekutta4::a) / sizeof(var_t); static const uint16_t n_bh = sizeof(int_rungekutta4::bh) / sizeof(var_t); static bool first_call = true; static uint32_t n_var = 0; if (n_var != f.n_var) { optimize = true; n_var = f.n_var; } else { optimize = false; } uint16_t stage = 0; t = f.t; //f.calc_dy(stage, t, f.y, k[0]); // -> k1 // The final function evaluation at the nth step is the same as the first at the (n+1)th step, // thus the effective number of function evaluations per step is 4. if (!adaptive) { // Calculate initial differentials and store them into k f.calc_dy(stage, t, f.y, k[0]); // -> k1 } else { if (first_call) { first_call = false; // Calculate initial differentials and store them into k f.calc_dy(stage, t, f.y, k[0]); // -> k1 } else { if (PROC_UNIT_GPU == comp_dev.proc_unit) { CUDA_SAFE_CALL(hipMemcpy(k[0], k[4], f.n_var*sizeof(var_t), hipMemcpyDeviceToDevice)); } else { memcpy(k[0], k[4], f.n_var*sizeof(var_t)); } } } var_t max_err = 0.0; uint16_t iter = 0; do { dt_did = dt_try; // Compute in advance the dt_try * coefficients to save n_var multiplication per stage for (uint16_t i = 0; i < n_a; i++) { h_a[i] = dt_try * a[i]; } for (uint16_t i = 0; i < n_bh; i++) { h_bh[i] = dt_try * bh[i]; } if (PROC_UNIT_GPU == comp_dev.proc_unit) { copy_vector_to_device(d_a, h_a, sizeof(h_a) ); copy_vector_to_device(d_bh, h_bh, sizeof(h_bh)); } for (stage = 1; stage < 4; stage++) // stage = 1, 2, 3 { t = f.t + c[stage] * dt_try; // -> tn + h2, tn + h/2, tn + h calc_ytemp(stage); // -> ytmp = yn + h/2*k1, ytmp = yn + h/2*k2, ytmp = yn + h*k3 f.calc_dy(stage, t, ytemp, k[stage]); // -> k2, k3, k4 } // We have stage (4) number of k vectors, approximate the solution in f.yout using the bh coeff: calc_y_np1(); // -> f.yout = y = ynp1 = yn + h/6*(k1 + 2*k2 + 2*k3 + k4) if (adaptive) { // Here stage = 4 t = f.t + c[stage] * dt_try; f.calc_dy(stage, t, f.yout, k[stage]); // -> k5 calc_error(f.n_var); max_err = get_max_error(f.n_var); max_err *= dt_try * lambda; calc_dt_try(max_err); } iter++; } while (adaptive && max_iter > iter && dt_min < dt_try && max_err > tolerance); if (max_iter <= iter) { throw std::string(err_msg1 + " The number of iteration exceeded the limit."); } if (dt_min > dt_try) { throw std::string(err_msg1 + " The stepsize is smaller than the limit."); } t = f.tout = f.t + dt_did; f.swap(); update_counters(iter); return dt_did; }
1a440e9caabf4bdd7406eb5abaa8772ea1d3f243.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "ode.h" #include "int_rungekutta4.h" #include "macro.h" #include "redutil2.h" using namespace redutil2; static const var_t lambda = 1.0/10.0; /* * Dormand, J. R.; Prince, P. J. * "New Runge-Kutta algorithms for numerical simulation in dynamical astronomy" * Celestial Mechanics, vol. 18, Oct. 1978, p. 223-232. * p. 225 Table I. Runge-Kutta 4(3)T */ // The Runge-Kutta matrix var_t int_rungekutta4::a[] = { 0.0, 0.0, 0.0, 0.0, // y = yn -> k1 1.0/2.0, 0.0, 0.0, 0.0, // y = ytmp = yn + h/2*k1 -> k2 0.0, 1.0/2.0, 0.0, 0.0, // y = ytmp = yn + h/2*k2 -> k3 0.0, 0.0, 1.0, 0.0, // y = ytmp = yn + h*k3 -> k4 /*---------------------------------------------------------------------------------------*/ 1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0 // y = yn + h/6*(k1 + 2*k1 + 2*k3 + k4) -> k5 }; /* 5 x 4 matrix */ static uint16_t a_row = 5; static uint16_t a_col = 4; // weights var_t int_rungekutta4::bh[] = { 1.0/6.0, 1.0/3.0, 1.0/3.0, 1.0/6.0 }; // nodes var_t int_rungekutta4::c[] = { 0.0, 1.0/2.0, 1.0/2.0, 1.0, 1.0 }; // These arrays will contain the stepsize multiplied by the constants var_t int_rungekutta4::h_a[sizeof(int_rungekutta4::a) / sizeof(var_t)]; var_t int_rungekutta4::h_bh[sizeof(int_rungekutta4::bh) / sizeof(var_t)]; //__constant__ var_t dc_a[sizeof(int_rungekutta4::a) / sizeof(var_t)]; //__constant__ var_t dc_bh[sizeof(int_rungekutta4::bh) / sizeof(var_t)]; int_rungekutta4::int_rungekutta4(ode& f, bool adaptive, var_t tolerance, comp_dev_t comp_dev) : integrator(f, adaptive, tolerance, (adaptive ? 5 : 4), comp_dev) { name = "Runge-Kutta4"; n_order = 4; d_a = NULL; d_bh = NULL; check_Butcher_tableau(); if (PROC_UNIT_GPU == comp_dev.proc_unit) { allocate_Butcher_tableau(); } } int_rungekutta4::~int_rungekutta4() { if (PROC_UNIT_GPU == comp_dev.proc_unit) { deallocate_Butcher_tableau(); } } void int_rungekutta4::allocate_Butcher_tableau() { ALLOCATE_DEVICE_VECTOR((void**)&d_a, sizeof(a)); ALLOCATE_DEVICE_VECTOR((void**)&d_bh, sizeof(bh)); } void int_rungekutta4::deallocate_Butcher_tableau() { FREE_DEVICE_VECTOR((void**)&d_a); FREE_DEVICE_VECTOR((void**)&d_bh); } void int_rungekutta4::check_Butcher_tableau() { uint16_t n_c = sizeof(int_rungekutta4::c) / sizeof(var_t); uint16_t n_col = (sizeof(int_rungekutta4::a) / sizeof(var_t)) / n_c; for (uint16_t i = 0; i < n_c; i++) { var_t sum = 0.0; for (uint16_t j = 0; j < n_col; j++) { uint16_t k = i * n_col + j; sum += a[k]; } if (1.0e-15 < fabs(sum - c[i])) { throw std::string("The Runge-Kutta 4 is not consistent (sum(a_ij) != c_i.)"); } } } void int_rungekutta4::calc_ytemp(uint16_t stage) { if (PROC_UNIT_GPU == comp_dev.proc_unit) { var_t* coeff = d_a + stage * a_col; gpu_calc_lin_comb_s(ytemp, f.y, d_k, coeff, stage, f.n_var, comp_dev.id_dev, optimize); } else { var_t* coeff = h_a + stage * a_col; tools::calc_lin_comb_s(ytemp, f.y, h_k, coeff, stage, f.n_var); } } void int_rungekutta4::calc_y_np1() { if (PROC_UNIT_GPU == comp_dev.proc_unit) { var_t* coeff = d_bh; gpu_calc_lin_comb_s(f.yout, f.y, d_k, coeff, 4, f.n_var, comp_dev.id_dev, optimize); } else { var_t* coeff = h_bh; tools::calc_lin_comb_s(f.yout, f.y, h_k, coeff, 4, f.n_var); } } void int_rungekutta4::calc_error(uint32_t n) { if (PROC_UNIT_GPU == comp_dev.proc_unit) { gpu_calc_rk4_error(err, k[3], k[4], n, comp_dev.id_dev, optimize); } else { for (uint32_t i = 0; i < n; i++) { h_err[i] = fabs(h_k[3][i] - h_k[4][i]); } } } var_t int_rungekutta4::step() { static std::string err_msg1 = "The integrator could not provide the approximation of the solution with the specified tolerance."; static const uint16_t n_a = sizeof(int_rungekutta4::a) / sizeof(var_t); static const uint16_t n_bh = sizeof(int_rungekutta4::bh) / sizeof(var_t); static bool first_call = true; static uint32_t n_var = 0; if (n_var != f.n_var) { optimize = true; n_var = f.n_var; } else { optimize = false; } uint16_t stage = 0; t = f.t; //f.calc_dy(stage, t, f.y, k[0]); // -> k1 // The final function evaluation at the nth step is the same as the first at the (n+1)th step, // thus the effective number of function evaluations per step is 4. if (!adaptive) { // Calculate initial differentials and store them into k f.calc_dy(stage, t, f.y, k[0]); // -> k1 } else { if (first_call) { first_call = false; // Calculate initial differentials and store them into k f.calc_dy(stage, t, f.y, k[0]); // -> k1 } else { if (PROC_UNIT_GPU == comp_dev.proc_unit) { CUDA_SAFE_CALL(cudaMemcpy(k[0], k[4], f.n_var*sizeof(var_t), cudaMemcpyDeviceToDevice)); } else { memcpy(k[0], k[4], f.n_var*sizeof(var_t)); } } } var_t max_err = 0.0; uint16_t iter = 0; do { dt_did = dt_try; // Compute in advance the dt_try * coefficients to save n_var multiplication per stage for (uint16_t i = 0; i < n_a; i++) { h_a[i] = dt_try * a[i]; } for (uint16_t i = 0; i < n_bh; i++) { h_bh[i] = dt_try * bh[i]; } if (PROC_UNIT_GPU == comp_dev.proc_unit) { copy_vector_to_device(d_a, h_a, sizeof(h_a) ); copy_vector_to_device(d_bh, h_bh, sizeof(h_bh)); } for (stage = 1; stage < 4; stage++) // stage = 1, 2, 3 { t = f.t + c[stage] * dt_try; // -> tn + h2, tn + h/2, tn + h calc_ytemp(stage); // -> ytmp = yn + h/2*k1, ytmp = yn + h/2*k2, ytmp = yn + h*k3 f.calc_dy(stage, t, ytemp, k[stage]); // -> k2, k3, k4 } // We have stage (4) number of k vectors, approximate the solution in f.yout using the bh coeff: calc_y_np1(); // -> f.yout = y = ynp1 = yn + h/6*(k1 + 2*k2 + 2*k3 + k4) if (adaptive) { // Here stage = 4 t = f.t + c[stage] * dt_try; f.calc_dy(stage, t, f.yout, k[stage]); // -> k5 calc_error(f.n_var); max_err = get_max_error(f.n_var); max_err *= dt_try * lambda; calc_dt_try(max_err); } iter++; } while (adaptive && max_iter > iter && dt_min < dt_try && max_err > tolerance); if (max_iter <= iter) { throw std::string(err_msg1 + " The number of iteration exceeded the limit."); } if (dt_min > dt_try) { throw std::string(err_msg1 + " The stepsize is smaller than the limit."); } t = f.tout = f.t + dt_did; f.swap(); update_counters(iter); return dt_did; }
fe5b7ffb1b857d710c36d05f95415bec35ba9cb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "reference.h" #include "../benchmark_common.h" //__constant__ float c_coeff[10]; #define BLOCK_DIMX 32 #define BLOCK_DIMY 8 #define RADIUS 8 //4 #define SMEM_DIMX (BLOCK_DIMX+2*RADIUS) extern "C" __global__ void stencil_3D_order8(float *g_output, float *g_input, float *g_coeff, const int dimx, const int dimy, const int dimz, float flag ) { __shared__ float s_data[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int in_idx = iy*dimx + ix; int out_idx = 0; int stride = dimx*dimy; float infront1, infront2, infront3, infront4; float behind1, behind2, behind3, behind4; float current; int tx = threadIdx.x + RADIUS; int ty = threadIdx.y + RADIUS; // fill the "in-front" and "behind" data behind3 = g_input[in_idx]; in_idx += stride; behind2 = g_input[in_idx]; in_idx += stride; behind1 = g_input[in_idx]; in_idx += stride; current = g_input[in_idx]; out_idx = in_idx; in_idx += stride; infront1 = g_input[in_idx]; in_idx += stride; infront2 = g_input[in_idx]; in_idx += stride; infront3 = g_input[in_idx]; in_idx += stride; infront4 = g_input[in_idx]; in_idx += stride; for(int i=RADIUS; i<dimz-RADIUS; i++) { ////////////////////////////////////////// // advance the slice (move the thread-front) behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = g_input[in_idx]; in_idx += stride; out_idx += stride; __syncthreads(); ///////////////////////////////////////// // update the data slice in smem if(threadIdx.y<RADIUS) // halo above/below { s_data[threadIdx.y][tx] = g_input[out_idx-RADIUS*dimx]; s_data[threadIdx.y+BLOCK_DIMY+RADIUS][tx] = g_input[out_idx+BLOCK_DIMY*dimx]; } if(threadIdx.x<RADIUS) // halo left/right { s_data[ty][threadIdx.x] = g_input[out_idx-RADIUS]; s_data[ty][threadIdx.x+BLOCK_DIMX+RADIUS] = g_input[out_idx+BLOCK_DIMX]; } // update the slice in smem s_data[ty][tx] = current; __syncthreads(); ///////////////////////////////////////// // compute the output value float value = g_coeff[0] * current; value += g_coeff[1]*( infront1 + behind1 + s_data[ty-1][tx]+ s_data[ty+1][tx]+ s_data[ty][tx-1]+ s_data[ty][tx+1] ); value += g_coeff[2]*( infront2 + behind2 + s_data[ty-2][tx]+ s_data[ty+2][tx]+ s_data[ty][tx-2]+ s_data[ty][tx+2] ); value += g_coeff[3]*( infront3 + behind3 + s_data[ty-3][tx]+ s_data[ty+3][tx]+ s_data[ty][tx-3]+ s_data[ty][tx+3] ); value += g_coeff[4]*( infront4 + behind4 + s_data[ty-4][tx]+ s_data[ty+4][tx]+ s_data[ty][tx-4]+ s_data[ty][tx+4] ); g_output[out_idx] = value; } } void timing_experiment( void (*kernel)(float*, float*, float *, const int, const int, const int, float), float *d_output, float *d_input, float * d_coeff, int dimx, int dimy, int dimz, hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag, int smem=0, int nreps=1, float mult=1.f) { dim3 block(BLOCK_DIMX,BLOCK_DIMY); dim3 grid( dimx/block.x, dimy/block.y ); printf("(%d,%d)x(%d,%d) grid\n", grid.x,grid.y, block.x,block.y); for(int i=0; i<nreps; i++) { hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block),smem, stream_app, d_output, d_input, d_coeff, dimx, dimy, dimz, 0.f ); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); pthread_mutex_lock (mutexapp); } pthread_mutex_unlock (mutexapp); } int pad; int dimx; int dimy; int dimz; int nreps; int nbytes; float *d_input = 0, *d_output=0; float *h_data = 0, *h_reference = 0; float h_coeff_symmetric[10] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}; float * d_coeff; int main_threeDS(hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { pad = 0; dimx = 128*2; //640+pad; dimy = 64*2; //480; dimz = 64*2; //100; nreps = 1; nbytes = dimx*dimy*dimz*sizeof(float); hipMalloc( (void**)&d_input, nbytes); hipMalloc( (void**)&d_output, nbytes); printf("CUDA: %s\n", hipGetErrorString(hipGetLastError())); if( 0==d_input || 0==d_output ) { printf("couldn't allocate all GPU memory: %2.f MB\n", (2.f*nbytes)/(1024.f*1024.f)); exit(1); } printf("allocated %.1f MB on device\n", (2.f*nbytes)/(1024.f*1024.f)); h_data = (float*)malloc(nbytes); h_reference = (float*)malloc(nbytes); if( 0==h_data || 0==h_reference ) { printf("couldn't allocate CPU memory\n"); exit(1); } random_data( h_data, dimx,dimy,dimz, 1, 5 ); hipMemcpyAsync( d_input, h_data, nbytes, hipMemcpyHostToDevice, stream_app ); hipMemcpyAsync( d_output, h_data, nbytes, hipMemcpyHostToDevice, stream_app ); // setup coefficients hipMalloc( (void**)&d_coeff, 10*sizeof(float)); hipMemcpyAsync( d_coeff, h_coeff_symmetric, 10*sizeof(float), hipMemcpyHostToDevice, stream_app ); dim3 block(BLOCK_DIMX,BLOCK_DIMY); dim3 grid( dimx/block.x, dimy/block.y ); printf("(%d,%d)x(%d,%d) grid\n", grid.x,grid.y, block.x,block.y); printf("%20s ","FD_full_2D"); timing_experiment( stencil_3D_order8, d_output, d_input, d_coeff, dimx,dimy,dimz, stream_app, mutexapp ,flag, 0, nreps, (3.f*dimz-4*RADIUS)); if(d_input) hipFree(d_input); if(d_output) hipFree(d_output); return 0; }
fe5b7ffb1b857d710c36d05f95415bec35ba9cb7.cu
#include <stdio.h> #include "reference.h" #include "../benchmark_common.h" //__constant__ float c_coeff[10]; #define BLOCK_DIMX 32 #define BLOCK_DIMY 8 #define RADIUS 8 //4 #define SMEM_DIMX (BLOCK_DIMX+2*RADIUS) extern "C" __global__ void stencil_3D_order8(float *g_output, float *g_input, float *g_coeff, const int dimx, const int dimy, const int dimz, float flag ) { __shared__ float s_data[BLOCK_DIMY+2*RADIUS][BLOCK_DIMX+2*RADIUS]; int ix = blockIdx.x*blockDim.x + threadIdx.x; int iy = blockIdx.y*blockDim.y + threadIdx.y; int in_idx = iy*dimx + ix; int out_idx = 0; int stride = dimx*dimy; float infront1, infront2, infront3, infront4; float behind1, behind2, behind3, behind4; float current; int tx = threadIdx.x + RADIUS; int ty = threadIdx.y + RADIUS; // fill the "in-front" and "behind" data behind3 = g_input[in_idx]; in_idx += stride; behind2 = g_input[in_idx]; in_idx += stride; behind1 = g_input[in_idx]; in_idx += stride; current = g_input[in_idx]; out_idx = in_idx; in_idx += stride; infront1 = g_input[in_idx]; in_idx += stride; infront2 = g_input[in_idx]; in_idx += stride; infront3 = g_input[in_idx]; in_idx += stride; infront4 = g_input[in_idx]; in_idx += stride; for(int i=RADIUS; i<dimz-RADIUS; i++) { ////////////////////////////////////////// // advance the slice (move the thread-front) behind4 = behind3; behind3 = behind2; behind2 = behind1; behind1 = current; current = infront1; infront1 = infront2; infront2 = infront3; infront3 = infront4; infront4 = g_input[in_idx]; in_idx += stride; out_idx += stride; __syncthreads(); ///////////////////////////////////////// // update the data slice in smem if(threadIdx.y<RADIUS) // halo above/below { s_data[threadIdx.y][tx] = g_input[out_idx-RADIUS*dimx]; s_data[threadIdx.y+BLOCK_DIMY+RADIUS][tx] = g_input[out_idx+BLOCK_DIMY*dimx]; } if(threadIdx.x<RADIUS) // halo left/right { s_data[ty][threadIdx.x] = g_input[out_idx-RADIUS]; s_data[ty][threadIdx.x+BLOCK_DIMX+RADIUS] = g_input[out_idx+BLOCK_DIMX]; } // update the slice in smem s_data[ty][tx] = current; __syncthreads(); ///////////////////////////////////////// // compute the output value float value = g_coeff[0] * current; value += g_coeff[1]*( infront1 + behind1 + s_data[ty-1][tx]+ s_data[ty+1][tx]+ s_data[ty][tx-1]+ s_data[ty][tx+1] ); value += g_coeff[2]*( infront2 + behind2 + s_data[ty-2][tx]+ s_data[ty+2][tx]+ s_data[ty][tx-2]+ s_data[ty][tx+2] ); value += g_coeff[3]*( infront3 + behind3 + s_data[ty-3][tx]+ s_data[ty+3][tx]+ s_data[ty][tx-3]+ s_data[ty][tx+3] ); value += g_coeff[4]*( infront4 + behind4 + s_data[ty-4][tx]+ s_data[ty+4][tx]+ s_data[ty][tx-4]+ s_data[ty][tx+4] ); g_output[out_idx] = value; } } void timing_experiment( void (*kernel)(float*, float*, float *, const int, const int, const int, float), float *d_output, float *d_input, float * d_coeff, int dimx, int dimy, int dimz, cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag, int smem=0, int nreps=1, float mult=1.f) { dim3 block(BLOCK_DIMX,BLOCK_DIMY); dim3 grid( dimx/block.x, dimy/block.y ); printf("(%d,%d)x(%d,%d) grid\n", grid.x,grid.y, block.x,block.y); for(int i=0; i<nreps; i++) { kernel<<<grid,block,smem, stream_app>>>( d_output, d_input, d_coeff, dimx, dimy, dimz, 0.f ); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); pthread_mutex_lock (mutexapp); } pthread_mutex_unlock (mutexapp); } int pad; int dimx; int dimy; int dimz; int nreps; int nbytes; float *d_input = 0, *d_output=0; float *h_data = 0, *h_reference = 0; float h_coeff_symmetric[10] = {1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f}; float * d_coeff; int main_threeDS(cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { pad = 0; dimx = 128*2; //640+pad; dimy = 64*2; //480; dimz = 64*2; //100; nreps = 1; nbytes = dimx*dimy*dimz*sizeof(float); cudaMalloc( (void**)&d_input, nbytes); cudaMalloc( (void**)&d_output, nbytes); printf("CUDA: %s\n", cudaGetErrorString(cudaGetLastError())); if( 0==d_input || 0==d_output ) { printf("couldn't allocate all GPU memory: %2.f MB\n", (2.f*nbytes)/(1024.f*1024.f)); exit(1); } printf("allocated %.1f MB on device\n", (2.f*nbytes)/(1024.f*1024.f)); h_data = (float*)malloc(nbytes); h_reference = (float*)malloc(nbytes); if( 0==h_data || 0==h_reference ) { printf("couldn't allocate CPU memory\n"); exit(1); } random_data( h_data, dimx,dimy,dimz, 1, 5 ); cudaMemcpyAsync( d_input, h_data, nbytes, cudaMemcpyHostToDevice, stream_app ); cudaMemcpyAsync( d_output, h_data, nbytes, cudaMemcpyHostToDevice, stream_app ); // setup coefficients cudaMalloc( (void**)&d_coeff, 10*sizeof(float)); cudaMemcpyAsync( d_coeff, h_coeff_symmetric, 10*sizeof(float), cudaMemcpyHostToDevice, stream_app ); dim3 block(BLOCK_DIMX,BLOCK_DIMY); dim3 grid( dimx/block.x, dimy/block.y ); printf("(%d,%d)x(%d,%d) grid\n", grid.x,grid.y, block.x,block.y); printf("%20s ","FD_full_2D"); timing_experiment( stencil_3D_order8, d_output, d_input, d_coeff, dimx,dimy,dimz, stream_app, mutexapp ,flag, 0, nreps, (3.f*dimz-4*RADIUS)); if(d_input) cudaFree(d_input); if(d_output) cudaFree(d_output); return 0; }
333a0e56456649d69293633ec3befb0a58579a91.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <opencv2/opencv.hpp> #include <iostream> __global__ void convertToGray(uchar3 *color_pixel, unsigned char* gray_pixel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; gray_pixel[idx] = (unsigned char)(0.299f*color_pixel[idx].x) + 0.587f * (float)color_pixel[idx].y + 0.114f * (float)color_pixel[idx].z; } int main(int argc, char* argv) { // read image cv::Mat input_img = cv::imread("sample.jpg", 1); if (input_img.empty() == true) { std::cerr << "Error : cannnot find input image" << std::endl; } // image size int width = input_img.cols; int height = input_img.rows; std::cout << "Image size: " << width << "x" << height << std::endl; // host array uchar3* host_img_array_color = new uchar3[width * height]; unsigned char* host_img_array_gray = new unsigned char [width * height]; // to 1 array for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { host_img_array_color[x + y * width] = make_uchar3(input_img.at<cv::Vec3b>(y, x)[2], input_img.at<cv::Vec3b>(y, x)[1], input_img.at<cv::Vec3b>(y, x)[0]); } } // GPU memory uchar3* device_img_array_color; unsigned char* device_img_array_gray; int datasize_color = sizeof(uchar3) * width * height; int datasize_gray = sizeof(unsigned char) * width * height; hipMalloc((void**)&device_img_array_color, datasize_color); hipMalloc((void**)&device_img_array_gray, datasize_gray); // CPU to GPU hipMemcpy(device_img_array_color, host_img_array_color, datasize_color, hipMemcpyHostToDevice); // GPU convertToGray << <width * height, 1 >> > (device_img_array_color, device_img_array_gray); // GPU to CPU hipMemcpy(host_img_array_gray, device_img_array_gray, datasize_gray, hipMemcpyDeviceToHost); // Results cv::Mat1b output_img(height, width); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { output_img.at<unsigned char>(y, x) = host_img_array_gray[x + y * width]; } } cv::imwrite("test_gray.jpg", output_img); hipFree(device_img_array_color); hipFree(device_img_array_gray); delete host_img_array_color; delete host_img_array_gray; return 0; }
333a0e56456649d69293633ec3befb0a58579a91.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <opencv2/opencv.hpp> #include <iostream> __global__ void convertToGray(uchar3 *color_pixel, unsigned char* gray_pixel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; gray_pixel[idx] = (unsigned char)(0.299f*color_pixel[idx].x) + 0.587f * (float)color_pixel[idx].y + 0.114f * (float)color_pixel[idx].z; } int main(int argc, char* argv) { // read image cv::Mat input_img = cv::imread("sample.jpg", 1); if (input_img.empty() == true) { std::cerr << "Error : cannnot find input image" << std::endl; } // image size int width = input_img.cols; int height = input_img.rows; std::cout << "Image size: " << width << "x" << height << std::endl; // host array uchar3* host_img_array_color = new uchar3[width * height]; unsigned char* host_img_array_gray = new unsigned char [width * height]; // to 1 array for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { host_img_array_color[x + y * width] = make_uchar3(input_img.at<cv::Vec3b>(y, x)[2], input_img.at<cv::Vec3b>(y, x)[1], input_img.at<cv::Vec3b>(y, x)[0]); } } // GPU memory uchar3* device_img_array_color; unsigned char* device_img_array_gray; int datasize_color = sizeof(uchar3) * width * height; int datasize_gray = sizeof(unsigned char) * width * height; cudaMalloc((void**)&device_img_array_color, datasize_color); cudaMalloc((void**)&device_img_array_gray, datasize_gray); // CPU to GPU cudaMemcpy(device_img_array_color, host_img_array_color, datasize_color, cudaMemcpyHostToDevice); // GPU convertToGray << <width * height, 1 >> > (device_img_array_color, device_img_array_gray); // GPU to CPU cudaMemcpy(host_img_array_gray, device_img_array_gray, datasize_gray, cudaMemcpyDeviceToHost); // Results cv::Mat1b output_img(height, width); for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { output_img.at<unsigned char>(y, x) = host_img_array_gray[x + y * width]; } } cv::imwrite("test_gray.jpg", output_img); cudaFree(device_img_array_color); cudaFree(device_img_array_gray); delete host_img_array_color; delete host_img_array_gray; return 0; }
c4026fbd4efac729f869819b5bd09845aa93f8bb.hip
// !!! This is a file automatically generated by hipify!!! // test convolution using specific formula for Gauss kernel // compile with // nvcc -I.. -DCUDA_BLOCK_SIZE=192 -DMAXTHREADSPERBLOCK0=1024 -DSHAREDMEMPERBLOCK0=49152 -Wno-deprecated-gpu-targets -D__TYPE__=float -std=c++14 -O2 -o build/test_specific test_specific.cu // we compare a generic implementation of the Gauss kernel vs the specific // #include <stdio.h> #include <assert.h> #include <vector> #include <ctime> #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <keops_includes.h> using namespace keops; __TYPE__ floatrand() { return ((__TYPE__) std::rand())/RAND_MAX-.5; // random value between -.5 and .5 } template < class V > void fillrandom(V& v) { generate(v.begin(), v.end(), floatrand); // fills vector with random values } __TYPE__ floatone() { return ((__TYPE__) 1.0); } template < class V > void fillones(V& v) { generate(v.begin(), v.end(), floatone); // fills vector with ones } int main() { // symbolic variables of the function using X = Var<1,3,0>; // X is the first variable and represents a 3D vector using Y = Var<2,3,1>; // Y is the second variable and represents a 3D vector using B = Var<3,3,1>; // B is the third variable and represents a 3D vector using C = Param<0,1>; // C is the first extra parameter // symbolic expression of the function : Gauss kernel using F = GaussKernel<C,X,Y,B>; std::cout << std::endl << "Function F : generic Gauss kernel :" << std::endl; std::cout << PrintFormula<F>(); std::cout << std::endl << std::endl; using SF = GaussKernel_specific<C,X,Y,B>; std::cout << "Function SF = specific Gauss kernel :" << std::endl; std::cout << PrintFormula<SF>(); using FUNCONVF = Sum_Reduction<F>; using FUNCONVSF = Sum_Reduction<SF>; // now we test ------------------------------------------------------------------------------ std::cout << std::endl << std::endl << "Testing F" << std::endl; int Nx=5000, Ny=5000; std::vector<__TYPE__> vf(Nx*F::DIM); fillrandom(vf); __TYPE__ *f = vf.data(); std::vector<__TYPE__> vx(Nx*X::DIM); fillrandom(vx); __TYPE__ *x = vx.data(); std::vector<__TYPE__> vy(Ny*Y::DIM); fillrandom(vy); __TYPE__ *y = vy.data(); std::vector<__TYPE__> vb(Ny*B::DIM); fillrandom(vb); __TYPE__ *b = vb.data(); std::vector<__TYPE__> resgpu1(Nx*F::DIM), resgpu2(Nx*F::DIM); __TYPE__ params[1]; __TYPE__ Sigma = 1; params[0] = 1.0/(Sigma*Sigma); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); // first dummy call to Gpu Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); // second dummy call to Gpu clock_t begin, end; begin = clock(); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); end = clock(); std::cout << "time for GPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; /// testing SF std::cout << std::endl << std::endl << "Testing SF" << std::endl; begin = clock(); Eval<FUNCONVSF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); end = clock(); std::cout << "time for GPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; // display values std::cout << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; // display mean of errors __TYPE__ s = 0; for(int i=0; i<Nx*F::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << std::endl << "mean abs error =" << s/Nx << std::endl; // gradient with respect to X --------------------------------------------------------------- using Eta = Var<4,F::DIM,0>; // new variable is in seventh position and is input of gradient using FUNCONVGX = Grad<FUNCONVF,X,Eta>; using FUNCONVSGX = Grad<FUNCONVSF,X,Eta>; std::vector<__TYPE__> ve(Nx*Eta::DIM); fillrandom(ve); __TYPE__ *e = ve.data(); std::cout << "testing gradient wrt X of F" << std::endl; begin = clock(); Eval<FUNCONVGX,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b, e); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; std::cout << "testing gradient wrt X of SF" << std::endl; begin = clock(); Eval<FUNCONVSGX,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b, e); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; // display values std::cout << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*F::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << std::endl << "mean abs error =" << s/Nx << std::endl; }
c4026fbd4efac729f869819b5bd09845aa93f8bb.cu
// test convolution using specific formula for Gauss kernel // compile with // nvcc -I.. -DCUDA_BLOCK_SIZE=192 -DMAXTHREADSPERBLOCK0=1024 -DSHAREDMEMPERBLOCK0=49152 -Wno-deprecated-gpu-targets -D__TYPE__=float -std=c++14 -O2 -o build/test_specific test_specific.cu // we compare a generic implementation of the Gauss kernel vs the specific // #include <stdio.h> #include <assert.h> #include <vector> #include <ctime> #include <algorithm> #include <iostream> #include <cuda.h> #include <keops_includes.h> using namespace keops; __TYPE__ floatrand() { return ((__TYPE__) std::rand())/RAND_MAX-.5; // random value between -.5 and .5 } template < class V > void fillrandom(V& v) { generate(v.begin(), v.end(), floatrand); // fills vector with random values } __TYPE__ floatone() { return ((__TYPE__) 1.0); } template < class V > void fillones(V& v) { generate(v.begin(), v.end(), floatone); // fills vector with ones } int main() { // symbolic variables of the function using X = Var<1,3,0>; // X is the first variable and represents a 3D vector using Y = Var<2,3,1>; // Y is the second variable and represents a 3D vector using B = Var<3,3,1>; // B is the third variable and represents a 3D vector using C = Param<0,1>; // C is the first extra parameter // symbolic expression of the function : Gauss kernel using F = GaussKernel<C,X,Y,B>; std::cout << std::endl << "Function F : generic Gauss kernel :" << std::endl; std::cout << PrintFormula<F>(); std::cout << std::endl << std::endl; using SF = GaussKernel_specific<C,X,Y,B>; std::cout << "Function SF = specific Gauss kernel :" << std::endl; std::cout << PrintFormula<SF>(); using FUNCONVF = Sum_Reduction<F>; using FUNCONVSF = Sum_Reduction<SF>; // now we test ------------------------------------------------------------------------------ std::cout << std::endl << std::endl << "Testing F" << std::endl; int Nx=5000, Ny=5000; std::vector<__TYPE__> vf(Nx*F::DIM); fillrandom(vf); __TYPE__ *f = vf.data(); std::vector<__TYPE__> vx(Nx*X::DIM); fillrandom(vx); __TYPE__ *x = vx.data(); std::vector<__TYPE__> vy(Ny*Y::DIM); fillrandom(vy); __TYPE__ *y = vy.data(); std::vector<__TYPE__> vb(Ny*B::DIM); fillrandom(vb); __TYPE__ *b = vb.data(); std::vector<__TYPE__> resgpu1(Nx*F::DIM), resgpu2(Nx*F::DIM); __TYPE__ params[1]; __TYPE__ Sigma = 1; params[0] = 1.0/(Sigma*Sigma); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); // first dummy call to Gpu Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); // second dummy call to Gpu clock_t begin, end; begin = clock(); Eval<FUNCONVF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); end = clock(); std::cout << "time for GPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; /// testing SF std::cout << std::endl << std::endl << "Testing SF" << std::endl; begin = clock(); Eval<FUNCONVSF,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b); end = clock(); std::cout << "time for GPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; // display values std::cout << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; // display mean of errors __TYPE__ s = 0; for(int i=0; i<Nx*F::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << std::endl << "mean abs error =" << s/Nx << std::endl; // gradient with respect to X --------------------------------------------------------------- using Eta = Var<4,F::DIM,0>; // new variable is in seventh position and is input of gradient using FUNCONVGX = Grad<FUNCONVF,X,Eta>; using FUNCONVSGX = Grad<FUNCONVSF,X,Eta>; std::vector<__TYPE__> ve(Nx*Eta::DIM); fillrandom(ve); __TYPE__ *e = ve.data(); std::cout << "testing gradient wrt X of F" << std::endl; begin = clock(); Eval<FUNCONVGX,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b, e); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu1 = vf; std::cout << "testing gradient wrt X of SF" << std::endl; begin = clock(); Eval<FUNCONVSGX,GpuConv1D_FromHost>::Run(Nx, Ny, f, params, x, y, b, e); end = clock(); std::cout << "time for CPU computation : " << double(end - begin) / CLOCKS_PER_SEC << std::endl; resgpu2 = vf; // display values std::cout << "resgpu1 = "; for(int i=0; i<5; i++) std::cout << resgpu1[i] << " "; std::cout << std::endl << "resgpu2 = "; for(int i=0; i<5; i++) std::cout << resgpu2[i] << " "; // display mean of errors s = 0; for(int i=0; i<Nx*F::DIM; i++) s += std::abs(resgpu1[i]-resgpu2[i]); std::cout << std::endl << "mean abs error =" << s/Nx << std::endl; }
500af5215095a2a5fb74b5d1e978e740f62773c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*** * File Name: sobel_edge.cu * Description: This Program Performs Sobel edge detection operations on a .bmp, once by a * serial algorithm, and once by a massively parallel CUDA algorithm. */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <time.h> //C version of Code to be specified as extern, //because CUDA uses g++ version of compiler. extern "C" { #include "read_bmp.h" } //Define white,black and threshold values. #define PIXEL_BLACK 0 #define PIXEL_WHITE 255 #define PERCENT_BLACK_THRESHOLD 0.75 //Define the thread hierarchy being used. #define CUDA_GRIDS 10 #define CUDA_BLOCKS_PER_GRID 32 #define CUDA_THREADS_PER_BLOCK 1024 #define NS_PER_SEC 1000000000 //Returns the pixel value when we pass row,coulmn value #define getPixelValue(row, col, dim) \ (((row) * (dim)) + (col)) //Values required for timer calculation. static struct timespec serialStart; static struct timespec serialEnd; static struct timespec parallelStart; static struct timespec parallelEnd; /* * Function to Display all the required information: matrix and CUDA parameters. * * @param inputFile -- name of the input image * @param serialOutputFile -- name of the serial output image * @param parallelOutputFile -- name of the parallel output image * @param imageHeight -- Height of the image in pixels * @param imageWidth -- Width of the image in pixels */ void DisplayParameters(char *inputFile, char *serialOutputFile, char *cudaOutputFile, int imageHeight,int imageWidth) { printf("********************************************************************************\n"); printf("Serial and CUDA Sobel edge detection.\n"); printf("\n"); printf("Input image: %s \t(Height is: %d pixels, Width is : %d pixels)\n", inputFile, imageHeight, imageWidth); printf("Serial output image is: \t%s\n", serialOutputFile); printf("CUDA output image is: \t%s\n", cudaOutputFile); printf("\n"); printf("CUDA computation specifications are:\n"); printf("Grids = %d grids\n", CUDA_GRIDS); printf("Blocks= %d blocks\n", CUDA_BLOCKS_PER_GRID); printf("tpb= %d threads per block\n", CUDA_THREADS_PER_BLOCK); printf("\n"); printf("*********************************************************************************\n"); } /* * Function to display information of threshold,timing and convergence results onto the screen. * * @param serialConvergenceThreshold * @param serialConvergenceThreshold */ void DisplayResults(int serialConvergenceThreshold, int parallelConvergenceThreshold) { printf("*******************************************************************************\n"); printf("Time taken for serial Sobel edge detection: %lf\n", (getPixelValue(serialEnd.tv_sec, serialEnd.tv_nsec, NS_PER_SEC) - getPixelValue(serialStart.tv_sec, serialStart.tv_nsec, NS_PER_SEC)) / ((double)NS_PER_SEC)); printf("Convergence Threshold: %d\n", serialConvergenceThreshold); printf("\n"); printf("Time taken for CUDA Parallel Sobel edge detection: %lf\n", (getPixelValue(parallelEnd.tv_sec, parallelEnd.tv_nsec, NS_PER_SEC) - getPixelValue(parallelStart.tv_sec, parallelStart.tv_nsec, NS_PER_SEC)) / ((double)NS_PER_SEC)); printf("Convergence Threshold: %d\n", parallelConvergenceThreshold); printf("********************************************************************************\n"); } /* * Serial algorithm to perform Sobel edge detection on an input pixel * image which is at different brightness thresholds until a certain percentage of * pixels in the output pixel buffer are black(75% in our case). * * @param input -- input pixel buffer * @param output -- output pixel buffer * @param height -- height of pixel image * @param width -- width of pixel image * @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD(75%) of pixels are black */ int SerialSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width) { //printf("height=%d, width =%d\n",height,width); int i=0,j=0; int gradientThreshold=0, blackPixelCount = 0 , boundary = height * width * 3 / 4; int whitePixelCount =0; //printf("value 75% = %d",boundary); // Loop until blackpixel counts are less than boundary(75% black cells) while(blackPixelCount < boundary) { gradientThreshold = gradientThreshold +1; //printf("%d -- blackpix\n",blackPixelCount); //printf("%d -- gradthresh\n",gradientThreshold); blackPixelCount = 0; for(i=1;i<(height-1);i++) { for(j=1; j<(width-1);j++) { double Gx = (1*input[getPixelValue(i - 1, j + 1, width)]) + (-1 * input[getPixelValue(i-1, j - 1, width)]) + (2 * input[getPixelValue(i, j + 1, width)]) + (-2 * input[getPixelValue(i, j - 1, width)]) + (1 * input[getPixelValue(i + 1, j + 1, width)]) + (-1 * input[getPixelValue(i + 1, j - 1, width)]); double Gy = (1* input[getPixelValue(i - 1, j - 1, width)]) + (2 * input[getPixelValue(i - 1, j, width)]) + (1 * input[getPixelValue(i - 1, j + 1, width)]) + (-1 * input[getPixelValue(i + 1, j - 1, width)]) + (-2 * input[getPixelValue(i + 1, j, width)]) + (-1 * input[getPixelValue(i + 1, j + 1, width)]); //Instead of squareroot, square threshold and compare directly with magnitude value if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold)) { //set the output value to white output[getPixelValue(i,j,width)] = PIXEL_WHITE; whitePixelCount++; } else { //set the output value to black output[getPixelValue(i,j,width)] = PIXEL_BLACK; blackPixelCount++; } } } //printf("white=%d",whitePixelCount); //printf("blck=%d",blackPixelCount); } //printf("%d -- gradthresh\n",gradientThreshold); return gradientThreshold; } /* * This is Parallel CUDA kernel function that performs a Sobel edge detection * on a group of pixels. This kernel function is called from host's function. * */ __global__ void CudaSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width, int gradientThreshold) { int row = 0; for(int i = 0; row < (height - 1); i++) { // Let the blockIdx increment beyond its dimension for cyclic distribution of the test pixels int blockRow = (i * gridDim.x) + blockIdx.x; // Calculate the row/col in the image buffer that this thread is on row = (getPixelValue(blockRow, threadIdx.x, blockDim.x) / (width - 2)) + 1; int col = (getPixelValue(blockRow, threadIdx.x, blockDim.x) % (width - 2)) + 1; // Calculate Sobel magnitude of gradient directly, instead of using Sobel_Magnitude utility double Gx = // ( * input[getPixelValue(row - 1, col - 1, width)]) + ( +1 * input[getPixelValue(row - 1, col + 1, width)]) + ( -1 * input[getPixelValue(row - 1, col - 1, width)]) + ( +2 * input[getPixelValue(row, col + 1, width)]) + ( -2 * input[getPixelValue(row, col - 1, width)]) + ( +1 * input[getPixelValue(row + 1, col + 1, width)]) + ( -1 * input[getPixelValue(row + 1, col - 1, width)]); double Gy = ( +1 * input[getPixelValue(row - 1, col - 1, width)]) + ( +2 * input[getPixelValue(row - 1, col, width)]) + ( +1 * input[getPixelValue(row - 1, col + 1, width)]) + ( -1 * input[getPixelValue(row + 1, col - 1, width)]) + ( -2 * input[getPixelValue(row + 1, col, width)]) + ( -1 * input[getPixelValue(row + 1, col + 1, width)]); if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold)) { //set the output value to white. output[getPixelValue(row, col, width)] = PIXEL_WHITE; } else { //set the output pixel value to black. output[getPixelValue(row, col, width)] = PIXEL_BLACK; } } } /* * Parallel algorithm to perform a Sobel edge detection on an input pixel * image at different brightness thresholds until a certain percentage of * pixels in the output pixel buffer are black. * * @param input -- input pixel buffer * @param output -- output pixel buffer * @param height -- height of pixel image * @param width -- width of pixel image * @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD(75%) of pixels are black */ __host__ int ParallelSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width) { int numBlocks = CUDA_BLOCKS_PER_GRID; int threadsPerBlock = CUDA_THREADS_PER_BLOCK; size_t imageMemSize = height * width * sizeof(uint8_t); uint8_t *deviceInputImage, *deviceOutputImage; // Allocate device memory hipMalloc((void **)&deviceInputImage, imageMemSize); hipMalloc((void **)&deviceOutputImage, imageMemSize); // Copy host input image to device hipMemcpy(deviceInputImage, input, imageMemSize, hipMemcpyHostToDevice); //define the device data-structures dim3 dimGrid(numBlocks); dim3 dimBlock(threadsPerBlock); //Perform Parallel Cuda Sobel edge detetction by calling the kernel. int gradientThreshold, blackPixelCount = 0; for(gradientThreshold = 0; blackPixelCount < (height * width * 75 / 100); gradientThreshold++) { // Launching the Kernel hipLaunchKernelGGL(( CudaSobelEdgeDetection), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImage, deviceOutputImage, height, width, gradientThreshold); // Copy the device results array back to host hipMemcpy(output, deviceOutputImage, imageMemSize, hipMemcpyDeviceToHost); // Count the number of black pixels blackPixelCount = 0; for(int row = 1; row < (height - 1); row++) { for(int col = 1; col < (width - 1); col++) { if(output[getPixelValue(row, col, width)] == PIXEL_BLACK) { blackPixelCount++; } } } } //Due to for loop increment, we need to decrement value and return it. return gradientThreshold-1; } /* * Main function. */ int main(int argc, char* argv[]) { // Check for correct number of comand line args if (argc != 4) { printf("Error:Incorrect arguments: <input_img.bmp> <serial_output_img.bmp> <cuda_output_img.bmp> Please try again..\n"); return 0; } // Open the files specified by the command line args FILE *inputFile = fopen(argv[1], "rb"); FILE *serialOutputFile = fopen(argv[2], "wb"); FILE *cudaOutputFile = fopen(argv[3], "wb"); //check if the file is valid and can be opened. if(inputFile == NULL) { printf("Error: %s file could not be opened for reading.", argv[1]); } // Read in input image and allocate space for new output image buffers uint8_t *inputImage = (uint8_t *)read_bmp_file(inputFile); uint8_t *serialOutputImage = (uint8_t *)malloc(get_num_pixel()); uint8_t *cudaOutputImage = (uint8_t *)malloc(get_num_pixel()); // Display GPU related information by calling this function. DisplayParameters(argv[1], argv[2], argv[3], get_image_height(), get_image_width()); // Call the serial function for serial sobel edge detection. printf("Performing serial Sobel edge detection.\n"); clock_gettime(CLOCK_REALTIME, &serialStart); int serialConvergenceThreshold = SerialSobelEdgeDetection(inputImage, serialOutputImage, get_image_height(), get_image_width()); clock_gettime(CLOCK_REALTIME, &serialEnd); // Call the CUDA function for Parallel sobel edge detection printf("Performing CUDA parallel Sobel edge detection.\n"); clock_gettime(CLOCK_REALTIME, &parallelStart); int parallelConvergenceThreshold = ParallelSobelEdgeDetection(inputImage, cudaOutputImage, get_image_height(), get_image_width()); clock_gettime(CLOCK_REALTIME, &parallelEnd); //DisplayResults for parallel and serial ConvergenceThreshold. DisplayResults(serialConvergenceThreshold,parallelConvergenceThreshold); // Write output image buffers. Closes files and frees buffers. write_bmp_file(serialOutputFile, serialOutputImage); write_bmp_file(cudaOutputFile, cudaOutputImage); return 0; }
500af5215095a2a5fb74b5d1e978e740f62773c6.cu
/*** * File Name: sobel_edge.cu * Description: This Program Performs Sobel edge detection operations on a .bmp, once by a * serial algorithm, and once by a massively parallel CUDA algorithm. */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <time.h> //C version of Code to be specified as extern, //because CUDA uses g++ version of compiler. extern "C" { #include "read_bmp.h" } //Define white,black and threshold values. #define PIXEL_BLACK 0 #define PIXEL_WHITE 255 #define PERCENT_BLACK_THRESHOLD 0.75 //Define the thread hierarchy being used. #define CUDA_GRIDS 10 #define CUDA_BLOCKS_PER_GRID 32 #define CUDA_THREADS_PER_BLOCK 1024 #define NS_PER_SEC 1000000000 //Returns the pixel value when we pass row,coulmn value #define getPixelValue(row, col, dim) \ (((row) * (dim)) + (col)) //Values required for timer calculation. static struct timespec serialStart; static struct timespec serialEnd; static struct timespec parallelStart; static struct timespec parallelEnd; /* * Function to Display all the required information: matrix and CUDA parameters. * * @param inputFile -- name of the input image * @param serialOutputFile -- name of the serial output image * @param parallelOutputFile -- name of the parallel output image * @param imageHeight -- Height of the image in pixels * @param imageWidth -- Width of the image in pixels */ void DisplayParameters(char *inputFile, char *serialOutputFile, char *cudaOutputFile, int imageHeight,int imageWidth) { printf("********************************************************************************\n"); printf("Serial and CUDA Sobel edge detection.\n"); printf("\n"); printf("Input image: %s \t(Height is: %d pixels, Width is : %d pixels)\n", inputFile, imageHeight, imageWidth); printf("Serial output image is: \t%s\n", serialOutputFile); printf("CUDA output image is: \t%s\n", cudaOutputFile); printf("\n"); printf("CUDA computation specifications are:\n"); printf("Grids = %d grids\n", CUDA_GRIDS); printf("Blocks= %d blocks\n", CUDA_BLOCKS_PER_GRID); printf("tpb= %d threads per block\n", CUDA_THREADS_PER_BLOCK); printf("\n"); printf("*********************************************************************************\n"); } /* * Function to display information of threshold,timing and convergence results onto the screen. * * @param serialConvergenceThreshold * @param serialConvergenceThreshold */ void DisplayResults(int serialConvergenceThreshold, int parallelConvergenceThreshold) { printf("*******************************************************************************\n"); printf("Time taken for serial Sobel edge detection: %lf\n", (getPixelValue(serialEnd.tv_sec, serialEnd.tv_nsec, NS_PER_SEC) - getPixelValue(serialStart.tv_sec, serialStart.tv_nsec, NS_PER_SEC)) / ((double)NS_PER_SEC)); printf("Convergence Threshold: %d\n", serialConvergenceThreshold); printf("\n"); printf("Time taken for CUDA Parallel Sobel edge detection: %lf\n", (getPixelValue(parallelEnd.tv_sec, parallelEnd.tv_nsec, NS_PER_SEC) - getPixelValue(parallelStart.tv_sec, parallelStart.tv_nsec, NS_PER_SEC)) / ((double)NS_PER_SEC)); printf("Convergence Threshold: %d\n", parallelConvergenceThreshold); printf("********************************************************************************\n"); } /* * Serial algorithm to perform Sobel edge detection on an input pixel * image which is at different brightness thresholds until a certain percentage of * pixels in the output pixel buffer are black(75% in our case). * * @param input -- input pixel buffer * @param output -- output pixel buffer * @param height -- height of pixel image * @param width -- width of pixel image * @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD(75%) of pixels are black */ int SerialSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width) { //printf("height=%d, width =%d\n",height,width); int i=0,j=0; int gradientThreshold=0, blackPixelCount = 0 , boundary = height * width * 3 / 4; int whitePixelCount =0; //printf("value 75% = %d",boundary); // Loop until blackpixel counts are less than boundary(75% black cells) while(blackPixelCount < boundary) { gradientThreshold = gradientThreshold +1; //printf("%d -- blackpix\n",blackPixelCount); //printf("%d -- gradthresh\n",gradientThreshold); blackPixelCount = 0; for(i=1;i<(height-1);i++) { for(j=1; j<(width-1);j++) { double Gx = (1*input[getPixelValue(i - 1, j + 1, width)]) + (-1 * input[getPixelValue(i-1, j - 1, width)]) + (2 * input[getPixelValue(i, j + 1, width)]) + (-2 * input[getPixelValue(i, j - 1, width)]) + (1 * input[getPixelValue(i + 1, j + 1, width)]) + (-1 * input[getPixelValue(i + 1, j - 1, width)]); double Gy = (1* input[getPixelValue(i - 1, j - 1, width)]) + (2 * input[getPixelValue(i - 1, j, width)]) + (1 * input[getPixelValue(i - 1, j + 1, width)]) + (-1 * input[getPixelValue(i + 1, j - 1, width)]) + (-2 * input[getPixelValue(i + 1, j, width)]) + (-1 * input[getPixelValue(i + 1, j + 1, width)]); //Instead of squareroot, square threshold and compare directly with magnitude value if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold)) { //set the output value to white output[getPixelValue(i,j,width)] = PIXEL_WHITE; whitePixelCount++; } else { //set the output value to black output[getPixelValue(i,j,width)] = PIXEL_BLACK; blackPixelCount++; } } } //printf("white=%d",whitePixelCount); //printf("blck=%d",blackPixelCount); } //printf("%d -- gradthresh\n",gradientThreshold); return gradientThreshold; } /* * This is Parallel CUDA kernel function that performs a Sobel edge detection * on a group of pixels. This kernel function is called from host's function. * */ __global__ void CudaSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width, int gradientThreshold) { int row = 0; for(int i = 0; row < (height - 1); i++) { // Let the blockIdx increment beyond its dimension for cyclic distribution of the test pixels int blockRow = (i * gridDim.x) + blockIdx.x; // Calculate the row/col in the image buffer that this thread is on row = (getPixelValue(blockRow, threadIdx.x, blockDim.x) / (width - 2)) + 1; int col = (getPixelValue(blockRow, threadIdx.x, blockDim.x) % (width - 2)) + 1; // Calculate Sobel magnitude of gradient directly, instead of using Sobel_Magnitude utility double Gx = // ( * input[getPixelValue(row - 1, col - 1, width)]) + ( +1 * input[getPixelValue(row - 1, col + 1, width)]) + ( -1 * input[getPixelValue(row - 1, col - 1, width)]) + ( +2 * input[getPixelValue(row, col + 1, width)]) + ( -2 * input[getPixelValue(row, col - 1, width)]) + ( +1 * input[getPixelValue(row + 1, col + 1, width)]) + ( -1 * input[getPixelValue(row + 1, col - 1, width)]); double Gy = ( +1 * input[getPixelValue(row - 1, col - 1, width)]) + ( +2 * input[getPixelValue(row - 1, col, width)]) + ( +1 * input[getPixelValue(row - 1, col + 1, width)]) + ( -1 * input[getPixelValue(row + 1, col - 1, width)]) + ( -2 * input[getPixelValue(row + 1, col, width)]) + ( -1 * input[getPixelValue(row + 1, col + 1, width)]); if(((Gx * Gx) + (Gy * Gy)) > (gradientThreshold * gradientThreshold)) { //set the output value to white. output[getPixelValue(row, col, width)] = PIXEL_WHITE; } else { //set the output pixel value to black. output[getPixelValue(row, col, width)] = PIXEL_BLACK; } } } /* * Parallel algorithm to perform a Sobel edge detection on an input pixel * image at different brightness thresholds until a certain percentage of * pixels in the output pixel buffer are black. * * @param input -- input pixel buffer * @param output -- output pixel buffer * @param height -- height of pixel image * @param width -- width of pixel image * @return -- gradient threshold at which PERCENT_BLACK_THRESHOLD(75%) of pixels are black */ __host__ int ParallelSobelEdgeDetection(uint8_t *input, uint8_t *output, int height, int width) { int numBlocks = CUDA_BLOCKS_PER_GRID; int threadsPerBlock = CUDA_THREADS_PER_BLOCK; size_t imageMemSize = height * width * sizeof(uint8_t); uint8_t *deviceInputImage, *deviceOutputImage; // Allocate device memory cudaMalloc((void **)&deviceInputImage, imageMemSize); cudaMalloc((void **)&deviceOutputImage, imageMemSize); // Copy host input image to device cudaMemcpy(deviceInputImage, input, imageMemSize, cudaMemcpyHostToDevice); //define the device data-structures dim3 dimGrid(numBlocks); dim3 dimBlock(threadsPerBlock); //Perform Parallel Cuda Sobel edge detetction by calling the kernel. int gradientThreshold, blackPixelCount = 0; for(gradientThreshold = 0; blackPixelCount < (height * width * 75 / 100); gradientThreshold++) { // Launching the Kernel CudaSobelEdgeDetection<<<dimGrid, dimBlock>>>(deviceInputImage, deviceOutputImage, height, width, gradientThreshold); // Copy the device results array back to host cudaMemcpy(output, deviceOutputImage, imageMemSize, cudaMemcpyDeviceToHost); // Count the number of black pixels blackPixelCount = 0; for(int row = 1; row < (height - 1); row++) { for(int col = 1; col < (width - 1); col++) { if(output[getPixelValue(row, col, width)] == PIXEL_BLACK) { blackPixelCount++; } } } } //Due to for loop increment, we need to decrement value and return it. return gradientThreshold-1; } /* * Main function. */ int main(int argc, char* argv[]) { // Check for correct number of comand line args if (argc != 4) { printf("Error:Incorrect arguments: <input_img.bmp> <serial_output_img.bmp> <cuda_output_img.bmp> Please try again..\n"); return 0; } // Open the files specified by the command line args FILE *inputFile = fopen(argv[1], "rb"); FILE *serialOutputFile = fopen(argv[2], "wb"); FILE *cudaOutputFile = fopen(argv[3], "wb"); //check if the file is valid and can be opened. if(inputFile == NULL) { printf("Error: %s file could not be opened for reading.", argv[1]); } // Read in input image and allocate space for new output image buffers uint8_t *inputImage = (uint8_t *)read_bmp_file(inputFile); uint8_t *serialOutputImage = (uint8_t *)malloc(get_num_pixel()); uint8_t *cudaOutputImage = (uint8_t *)malloc(get_num_pixel()); // Display GPU related information by calling this function. DisplayParameters(argv[1], argv[2], argv[3], get_image_height(), get_image_width()); // Call the serial function for serial sobel edge detection. printf("Performing serial Sobel edge detection.\n"); clock_gettime(CLOCK_REALTIME, &serialStart); int serialConvergenceThreshold = SerialSobelEdgeDetection(inputImage, serialOutputImage, get_image_height(), get_image_width()); clock_gettime(CLOCK_REALTIME, &serialEnd); // Call the CUDA function for Parallel sobel edge detection printf("Performing CUDA parallel Sobel edge detection.\n"); clock_gettime(CLOCK_REALTIME, &parallelStart); int parallelConvergenceThreshold = ParallelSobelEdgeDetection(inputImage, cudaOutputImage, get_image_height(), get_image_width()); clock_gettime(CLOCK_REALTIME, &parallelEnd); //DisplayResults for parallel and serial ConvergenceThreshold. DisplayResults(serialConvergenceThreshold,parallelConvergenceThreshold); // Write output image buffers. Closes files and frees buffers. write_bmp_file(serialOutputFile, serialOutputImage); write_bmp_file(cudaOutputFile, cudaOutputImage); return 0; }
a9c8135691884101b3b9a16065b176fbba335863.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <cctype> #include <cassert> #include <cstdio> #include <ctime> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #ifndef nullptr #define nullptr 0 #endif using namespace std; ///////////////////////////////////////////////////// __global__ static voidKernel_SquareSum( int* pIn, size_t* pDataSize, int*pOut, clock_t* pElapsed ) { const size_t computeSize =*pDataSize / THREAD_NUM; const size_t tID = size_t(threadIdx.x ); // clock_t startTime; if ( tID == 0 ) startTime =clock( );// for ( size_t i = tID *computeSize; i < ( tID + 1 ) * computeSize; ++i ) { pOut[threadIdx.x] += pIn[i] * pIn[i]; } if ( tID == 0 ) *pElapsed =clock( ) - startTime;// } bool CUDA_SquareSum( int* pOut,clock_t* pElapsed, int* pIn, size_tdataSize ) { assert( pIn != nullptr ); assert( pOut != nullptr ); int* pDevIn = nullptr; int* pDevOut = nullptr; size_t* pDevDataSize = nullptr; clock_t* pDevElasped = nullptr; // 1 hipError_t cudaStatus = hipSetDevice( 0 );// if ( cudaStatus != hipSuccess ) { fprintf( stderr, "cudaSetDevice()" ); return false; } switch ( true) { default: // 2 cudaStatus = hipMalloc( (void**)&pDevIn,dataSize * sizeof( int) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevOut,THREAD_NUM * sizeof( int) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevDataSize,sizeof( size_t ) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } cudaStatus = hipMalloc( (void**)&pDevElasped,sizeof( clock_t ) ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMalloc()" ); break; } // 3 cudaStatus = hipMemcpy( pDevIn, pIn, dataSize * sizeof( int ),hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMemcpy()" ); break; } cudaStatus = hipMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), hipMemcpyHostToDevice ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "cudaMemcpy()" ); break; } // 4 hipLaunchKernelGGL(( Kernel_SquareSum), dim3(1),dim3(THREAD_NUM), 0, 0, pDevIn, pDevDataSize, pDevOut, pDevElasped ); // 5 cudaStatus = hipGetLastError( ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } // 6 cudaStatus = hipDeviceSynchronize( ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } // 7 cudaStatus = hipMemcpy( pOut, pDevOut, THREAD_NUM * sizeof( int ),hipMemcpyDeviceToHost ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } cudaStatus = hipMemcpy( pElapsed, pDevElasped, sizeof( clock_t ), hipMemcpyDeviceToHost ); if ( cudaStatus != hipSuccess) { fprintf( stderr, "" ); break; } hipFree( pDevIn ); hipFree( pDevOut ); hipFree( pDevDataSize ); hipFree( pDevElasped ); return true; } hipFree( pDevIn ); hipFree( pDevOut ); hipFree( pDevDataSize ); hipFree( pDevElasped ); return false; } void GenerateData( int* pData,size_t dataSize )// { assert( pData != nullptr ); for ( size_t i = 0; i <dataSize; i++ ) { srand( i + 3 ); pData[i] = rand( ) % 100; } } int main( int argc, char** argv )// { int* pData = nullptr; int* pResult = nullptr; clock_t* pElapsed = nullptr; // CUDAhost hipError_t cudaStatus = hipHostMalloc( &pData, DATA_SIZE * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } cudaStatus = hipHostMalloc( &pResult, THREAD_NUM * sizeof( int ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } cudaStatus = hipHostMalloc( &pElapsed, sizeof( clock_t ) ); if ( cudaStatus != hipSuccess ) { fprintf( stderr, "" ); return 1; } GenerateData( pData, DATA_SIZE );// CUDA_SquareSum( pResult, pElapsed, pData, DATA_SIZE );// // CPU int totalResult; for ( inti = 0; i < THREAD_NUM; ++i ) { totalResult += pResult[i]; } // char* pOverFlow = nullptr; if ( totalResult < 0 )pOverFlow = ""; else pOverFlow = ""; // printf( "CUDA%d%s\n%d\n", totalResult, pOverFlow, *pElapsed ); hipDeviceProp_t prop; if ( hipGetDeviceProperties(&prop, 0 ) == hipSuccess ) { clock_t actualTime = *pElapsed / clock_t( prop.clockRate ); printf( "%dms\n", actualTime ); printf( "%.2fMB/s\n", float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / float( actualTime )); printf( "GPU%s\n", prop.name ); } hipHostFree( pData ); hipHostFree( pResult ); hipHostFree( pElapsed ); return 0; }
a9c8135691884101b3b9a16065b176fbba335863.cu
#include <cuda_runtime.h> #include <cctype> #include <cassert> #include <cstdio> #include <ctime> #define DATA_SIZE 1048576 #define THREAD_NUM 256 #ifndef nullptr #define nullptr 0 #endif using namespace std; ////////////////////////在设备上运行的内核函数///////////////////////////// __global__ static voidKernel_SquareSum( int* pIn, size_t* pDataSize, int*pOut, clock_t* pElapsed ) { const size_t computeSize =*pDataSize / THREAD_NUM; const size_t tID = size_t(threadIdx.x ); // 开始计时 clock_t startTime; if ( tID == 0 ) startTime =clock( );// 选择任意一个线程进行计时 for ( size_t i = tID *computeSize; i < ( tID + 1 ) * computeSize; ++i ) { pOut[threadIdx.x] += pIn[i] * pIn[i]; } if ( tID == 0 ) *pElapsed =clock( ) - startTime;// 结束计时,返回至主程序 } bool CUDA_SquareSum( int* pOut,clock_t* pElapsed, int* pIn, size_tdataSize ) { assert( pIn != nullptr ); assert( pOut != nullptr ); int* pDevIn = nullptr; int* pDevOut = nullptr; size_t* pDevDataSize = nullptr; clock_t* pDevElasped = nullptr; // 1、设置设备 cudaError_t cudaStatus = cudaSetDevice( 0 );// 只要机器安装了英伟达显卡,那么会调用成功 if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "调用cudaSetDevice()函数失败!" ); return false; } switch ( true) { default: // 2、分配显存空间 cudaStatus = cudaMalloc( (void**)&pDevIn,dataSize * sizeof( int) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数组时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevOut,THREAD_NUM * sizeof( int) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中返回值时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevDataSize,sizeof( size_t ) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数据大小时失败!" ); break; } cudaStatus = cudaMalloc( (void**)&pDevElasped,sizeof( clock_t ) ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMalloc()函数初始化显卡中耗费用时变量失败!" ); break; } // 3、将宿主程序数据复制到显存中 cudaStatus = cudaMemcpy( pDevIn, pIn, dataSize * sizeof( int ),cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据数组到显卡时失败!" ); break; } cudaStatus = cudaMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), cudaMemcpyHostToDevice ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据大小到显卡时失败!" ); break; } // 4、执行程序,宿主程序等待显卡执行完毕 Kernel_SquareSum<<<1,THREAD_NUM>>>( pDevIn, pDevDataSize, pDevOut, pDevElasped ); // 5、查询内核初始化的时候是否出错 cudaStatus = cudaGetLastError( ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "显卡执行程序时失败!" ); break; } // 6、与内核同步等待执行完毕 cudaStatus = cudaDeviceSynchronize( ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在与内核同步的过程中发生问题!" ); break; } // 7、获取数据 cudaStatus = cudaMemcpy( pOut, pDevOut, THREAD_NUM * sizeof( int ),cudaMemcpyDeviceToHost ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在将结果数据从显卡复制到宿主程序中失败!" ); break; } cudaStatus = cudaMemcpy( pElapsed, pDevElasped, sizeof( clock_t ), cudaMemcpyDeviceToHost ); if ( cudaStatus != cudaSuccess) { fprintf( stderr, "在将耗费用时数据从显卡复制到宿主程序中失败!" ); break; } cudaFree( pDevIn ); cudaFree( pDevOut ); cudaFree( pDevDataSize ); cudaFree( pDevElasped ); return true; } cudaFree( pDevIn ); cudaFree( pDevOut ); cudaFree( pDevDataSize ); cudaFree( pDevElasped ); return false; } void GenerateData( int* pData,size_t dataSize )// 产生数据 { assert( pData != nullptr ); for ( size_t i = 0; i <dataSize; i++ ) { srand( i + 3 ); pData[i] = rand( ) % 100; } } int main( int argc, char** argv )// 函数的主入口 { int* pData = nullptr; int* pResult = nullptr; clock_t* pElapsed = nullptr; // 使用CUDA内存分配器分配host端 cudaError_t cudaStatus = cudaMallocHost( &pData, DATA_SIZE * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } cudaStatus = cudaMallocHost( &pResult, THREAD_NUM * sizeof( int ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } cudaStatus = cudaMallocHost( &pElapsed, sizeof( clock_t ) ); if ( cudaStatus != cudaSuccess ) { fprintf( stderr, "在主机中分配资源失败!" ); return 1; } GenerateData( pData, DATA_SIZE );// 通过随机数产生数据 CUDA_SquareSum( pResult, pElapsed, pData, DATA_SIZE );// 执行平方和 // 在CPU中将结果组合起来 int totalResult; for ( inti = 0; i < THREAD_NUM; ++i ) { totalResult += pResult[i]; } // 判断是否溢出 char* pOverFlow = nullptr; if ( totalResult < 0 )pOverFlow = "(溢出)"; else pOverFlow = ""; // 显示基准测试 printf( "用CUDA计算平方和的结果是:%d%s\n耗费用时:%d\n", totalResult, pOverFlow, *pElapsed ); cudaDeviceProp prop; if ( cudaGetDeviceProperties(&prop, 0 ) == cudaSuccess ) { clock_t actualTime = *pElapsed / clock_t( prop.clockRate ); printf( "实际执行时间为:%dms\n", actualTime ); printf( "带宽为:%.2fMB/s\n", float( DATA_SIZE * sizeof( int )>> 20 ) * 1000.0f / float( actualTime )); printf( "GPU设备型号:%s\n", prop.name ); } cudaFreeHost( pData ); cudaFreeHost( pResult ); cudaFreeHost( pElapsed ); return 0; }
a3653bd8ffa0a5c2be6628105ea13ba7b429dda7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //new /***************** EXAMPLE *********************** ArrayVals: 9, 31, 4, 18 padded ArrayVals: 09, 31, 04, 18 create histogram of size 10 for buckets 0-9 which each element initialized to 0. Use a thread on each element of ArrayVals and increment the value in the bucket it belongs to. This will count how many values that belong in each bucket. In the above example the histogram values would look like this: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET -------------------------------------- 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER next use an array to count the OFFSET and a copy of that OFFSET array. This is done by taking the element value at each index of the histogram and adding it to the value at the previous index. OFFSET Original: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4 OFFSET CHANGED IS JUST A COPY OF OFFSET ORIGINAL. OFFSET Changed: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4 ^ ^ ^ | | | | | taken from 4th index in histogram plus previous (1+3) | | | taken from second index plus the first index (1+2) | taken from the first index in histogram (2) The reason we create a copy is because later, when we want to determine how to rearange the elements, we have to decrement the values in OFFSET so they don't overwrite each other but we must also remember the original OFFSET values. This will become clearer later. As you can see the numbers that repeat occur (like index 2 and 4-9) when its corresponding index in the histogram equals 0 so the value doesn't increase. Now we need to iterate over ArrayVals again and look at the OFFSET changed array index it corresponds with to determine where it goes in the list. We'll create a second temporary list so that we don't ruin the order of the elements in the original ArrayVals. This can be done in parallel so we can use a thread to look at each element of ArrayVals at once. secondList[ArrayValsSize]; we will, for example, look at the first element in ArrayVals. Its left most digit is 0 so we will look at index 0 in the OFFSET changed array. We notice it has a value 2 so we can place this number at the 2nd index of the secondList array we just created. This would be index 1 because arrays start at 0. So whatever number fills the OFFSET changed index we subtract 1 to determine the position to insert into the secondList. After we input into the secondList we want to decrement the value in OFFSET changed so that the next number that checks can be placed in an empty spot and not overwrite the numbers in the same bucket. This means index 0 of the OFFSET changed array goes from 2 to 1. We do the same thing for the other three elements in ArrayVals. 31's first digit is a 3 so look at index 3 in OFFSET changed and we see that it gets placed at 4-1=3 index in the secondList. Remember to decrement the value at OFFSET changed[3] which = 4 so it becomes 3. continue this with the next value which is 04 which means we look at OFFSET changed[0], because its left most digit is 0, which has a value of 1 because the value 2 was decremented when 09 was placed in secondList above in line 75-78. Because the value is now 1 that means we insert 04 into index 1-1=0 of secondList. We finish with value 18. OFFSET changed[1] (because its left most bit is 1) has a value of 3 so we put 18 into secondList[2] because 3-1 = 2. After every element has been properly inserted into secondList, it should now look like this: secondList: 04, 09, 18, 31 We can see that its sorted but the computer doensn't know that. In order to be sure its sorted we iterate through the histogram and check to see if each value is at most 1. So if any value in histogram is greater than 1 then we can't be sure its sorted because we don't know which threads finished first. So next if we find a value in histogram that is greater than 1 we look to that index but in the original OFFSET. So histogram[0] has a value of 2 which means we look in the original OFFSET[0] to get the value 2. This means we are working from the ranges of 0-2 in the secondList. so we create histogram and OFFSET again. To do this we just use a recursion and basically repeate the process above but now only working with elements 0 and 1 based on the range provided. We want to do the same process as above but on the next digit to the right. so we sort 04 and 09 by counting them into the histogram and finding the OFFSET just like above in lines 15-30. They will each look like this: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 OFFSET: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 2 We iterate over histogram and see if any values are greater than 1. There are none so they must all be sorted! so we iterate over histogram and when we get to a value that is non 0 we can point to secondList and overwrite those numbers with the current numbers and they will be in the correct order. histogram[4] is the first element with a non 0 value. We were given ranges 0-2 from above (see lines 103-106) so we start at 0 and point to secondList[0] and insert 4. Then we continue our iteration over histogram and get to 9 as the next non 0 element. We can point to secondList[1] to insert 9. We are done with this part so it will return to the previous step which is line 102 where it will continuing iterating over its histogram looking for values greater than 1. Refer to the histogram displayed on line 23 as displayed here: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET -------------------------------------- 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER We branched off initially from histogram[0] because it had a value greater than 1 but now we are back and can continue. The rest of the elemnts contain either a 0 or 1 so don't need to be sorted anymore. This means secondList contains the sorted array. All that is left is to use threads for each element of secondList and copy their value into the original array ArrayVals because ArrayVals is the one that was sent from the CPU that needs to go back to the CPU. The array is sorted and we are done! **************************************************/ //new #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // #define MAX 2147483647; #define MAX 99; unsigned int * valuesList; unsigned int totalNumbers; void printArray(int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("]\n"); } void printArrayU(unsigned int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("]\n"); } __global__ void radixSort(unsigned int* valuesList, int digit, int arraySize, int* histogram, int* mainOffset, int* mainOffsetAfter) { // each element is corresponds to a bucket from 0-9 // each element initialized to 0. // __shared__ int histogram[10]; // int OFFSETOriginal[10]; __shared__ int OFFSETChanged[10]; int tid = threadIdx.x + blockIdx.x * blockDim.x; // take element in values at this instanced thread and find the digit // we're looking for thats passed in and increment the corresponding element // in the histogram if (tid < arraySize) atomicAdd(&histogram[valuesList[tid]/digit], 1); __syncthreads(); // find offset values // OFFSETOriginal[0] = histogram[0]; OFFSETChanged[0] = histogram[0]; // mainHistogram[0] = histogram[0]; // for testing purposes. mainOffset[0] = histogram[0]; for (int i = 1; i < 10; i++) { // mainHistogram[i] = histogram[i]; // for testing purposes. // OFFSETOriginal[i] = OFFSETOriginal[i-1] + histogram[i]; OFFSETChanged[i] = OFFSETChanged[i-1] + histogram[i]; mainOffset[i] = OFFSETChanged[i]; } // group numbers together by bucket if (tid < arraySize) { // get the value at this instanced threads id that corresponds to the value at its index in valuesList int value = valuesList[tid]; // find the max index this threads value found from valueList by looking in its offsetbucket int index = OFFSETChanged[valuesList[tid]/digit] - 1; // set every element in valuesList to 0. // valuesList[tid] = 0; // OFFSETChanged[valuesList[tid]/digit]--; __syncthreads(); // place the values at their index found above as long as its empty (contains a 0) // if its filled from another thread already placing a value there, // go to the index before it and keep searching down until you find an empty spot // while (valuesList[index] != 0) { // atomicAdd(&OFFSETChanged[valuesList[tid]/digit], -1); // index = OFFSETChanged[valuesList[tid]/digit] - 1; // } int previousValue = value; valuesList[index] = value; atomicAdd(&OFFSETChanged[previousValue/digit], -1); // the list should now be sorted by the 10's digit } __syncthreads(); for (int i = 0; i < 10; i++) { mainOffsetAfter[i] = OFFSETChanged[i]; } return; } __device__ void bucketSort(int* values, int digit) { } int * histogram; int * offset; int * offsetAfter; int main(int argc, char **argv) { totalNumbers = atoi(argv[1]); int histogramSize = 10; valuesList = (unsigned int *)malloc(sizeof(unsigned int)*totalNumbers); histogram = (int*)malloc(sizeof(int)*histogramSize); offset = (int*)malloc(sizeof(int)*histogramSize); offsetAfter = (int*)malloc(sizeof(int)*histogramSize); unsigned int* d_valuesList; int* d_histogram; int* d_offset; int* d_offsetAfter; srand(1); // generate totalNumbers random numbers for valuesList for (int i = 0; i < totalNumbers; i++) { valuesList[i] = (int) rand()%MAX; // valuesList[i] = 26; } printf("VALUES BEFORE:\n"); printArrayU(valuesList, totalNumbers); // fill histogram with 0's for (int i = 0; i < histogramSize; i++) { histogram[i] = 0; offset[i] = 0; offsetAfter[i] = 0; } hipMalloc((void **) &d_valuesList, sizeof(unsigned int)*totalNumbers); hipMemcpy(d_valuesList, valuesList, sizeof(unsigned int)*totalNumbers, hipMemcpyHostToDevice); hipMalloc((void**) &d_histogram, sizeof(int)*histogramSize); hipMemcpy(d_histogram, histogram, sizeof(int)*histogramSize, hipMemcpyHostToDevice); hipMalloc((void**) &d_offset, sizeof(int)*histogramSize); hipMemcpy(d_offset, offset, sizeof(int)*histogramSize, hipMemcpyHostToDevice); hipMalloc((void**) &d_offsetAfter, sizeof(int)*histogramSize); hipMemcpy(d_offsetAfter, offsetAfter, sizeof(int)*histogramSize, hipMemcpyHostToDevice); // digit should be the number we divide valuesList[i] by to find a particular digit. // i.e. if we are looking for the 10's digit we divid by 10. The 100's digit divid // by 100. 326 divide 100 returns 3. This example we limit our number size to only // be 2 digits (max_rand defined at top to be 50) so we pass in 10 as our digit to // find the left most digit, the 10's digit. // dim3 dimBlock(totalNumbers,1); dim3 dimGrid(totalNumbers/256 ,1, 1); if (totalNumbers%256) dimGrid.x++; dim3 dimBlock (256, 1, 1); int digit = 10; hipLaunchKernelGGL(( radixSort), dim3((totalNumbers+255)/256), dim3(256), 0, 0, d_valuesList, digit, totalNumbers, d_histogram, d_offset, d_offsetAfter); hipMemcpy(valuesList, d_valuesList, sizeof(unsigned int)*totalNumbers, hipMemcpyDeviceToHost); hipFree(d_valuesList); hipMemcpy(histogram, d_histogram, sizeof(int)*histogramSize, hipMemcpyDeviceToHost); hipFree(d_histogram); hipMemcpy(offset, d_offset, sizeof(int)*histogramSize, hipMemcpyDeviceToHost); hipFree(d_offset); hipMemcpy(offsetAfter, d_offsetAfter, sizeof(int)*histogramSize, hipMemcpyDeviceToHost); hipFree(d_offsetAfter); printf("HISTOGRAM:\n"); printArray(histogram, histogramSize); printf("OFFSET BEFORE:\n"); printArray(offset, histogramSize); printf("OFFSET AFTER:\n"); printArray(offsetAfter, histogramSize); // print valuesList printf("VALUES AFTER:\n"); printArrayU(valuesList, totalNumbers); return 0; }
a3653bd8ffa0a5c2be6628105ea13ba7b429dda7.cu
//new /***************** EXAMPLE *********************** ArrayVals: 9, 31, 4, 18 padded ArrayVals: 09, 31, 04, 18 create histogram of size 10 for buckets 0-9 which each element initialized to 0. Use a thread on each element of ArrayVals and increment the value in the bucket it belongs to. This will count how many values that belong in each bucket. In the above example the histogram values would look like this: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET -------------------------------------- 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER next use an array to count the OFFSET and a copy of that OFFSET array. This is done by taking the element value at each index of the histogram and adding it to the value at the previous index. OFFSET Original: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4 OFFSET CHANGED IS JUST A COPY OF OFFSET ORIGINAL. OFFSET Changed: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 2 | 3 | 3 | 4 | 4 | 4 | 4 | 4 | 4 | 4 ^ ^ ^ | | | | | taken from 4th index in histogram plus previous (1+3) | | | taken from second index plus the first index (1+2) | taken from the first index in histogram (2) The reason we create a copy is because later, when we want to determine how to rearange the elements, we have to decrement the values in OFFSET so they don't overwrite each other but we must also remember the original OFFSET values. This will become clearer later. As you can see the numbers that repeat occur (like index 2 and 4-9) when its corresponding index in the histogram equals 0 so the value doesn't increase. Now we need to iterate over ArrayVals again and look at the OFFSET changed array index it corresponds with to determine where it goes in the list. We'll create a second temporary list so that we don't ruin the order of the elements in the original ArrayVals. This can be done in parallel so we can use a thread to look at each element of ArrayVals at once. secondList[ArrayValsSize]; we will, for example, look at the first element in ArrayVals. Its left most digit is 0 so we will look at index 0 in the OFFSET changed array. We notice it has a value 2 so we can place this number at the 2nd index of the secondList array we just created. This would be index 1 because arrays start at 0. So whatever number fills the OFFSET changed index we subtract 1 to determine the position to insert into the secondList. After we input into the secondList we want to decrement the value in OFFSET changed so that the next number that checks can be placed in an empty spot and not overwrite the numbers in the same bucket. This means index 0 of the OFFSET changed array goes from 2 to 1. We do the same thing for the other three elements in ArrayVals. 31's first digit is a 3 so look at index 3 in OFFSET changed and we see that it gets placed at 4-1=3 index in the secondList. Remember to decrement the value at OFFSET changed[3] which = 4 so it becomes 3. continue this with the next value which is 04 which means we look at OFFSET changed[0], because its left most digit is 0, which has a value of 1 because the value 2 was decremented when 09 was placed in secondList above in line 75-78. Because the value is now 1 that means we insert 04 into index 1-1=0 of secondList. We finish with value 18. OFFSET changed[1] (because its left most bit is 1) has a value of 3 so we put 18 into secondList[2] because 3-1 = 2. After every element has been properly inserted into secondList, it should now look like this: secondList: 04, 09, 18, 31 We can see that its sorted but the computer doensn't know that. In order to be sure its sorted we iterate through the histogram and check to see if each value is at most 1. So if any value in histogram is greater than 1 then we can't be sure its sorted because we don't know which threads finished first. So next if we find a value in histogram that is greater than 1 we look to that index but in the original OFFSET. So histogram[0] has a value of 2 which means we look in the original OFFSET[0] to get the value 2. This means we are working from the ranges of 0-2 in the secondList. so we create histogram and OFFSET again. To do this we just use a recursion and basically repeate the process above but now only working with elements 0 and 1 based on the range provided. We want to do the same process as above but on the next digit to the right. so we sort 04 and 09 by counting them into the histogram and finding the OFFSET just like above in lines 15-30. They will each look like this: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 OFFSET: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 -------------------------------------- 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 2 We iterate over histogram and see if any values are greater than 1. There are none so they must all be sorted! so we iterate over histogram and when we get to a value that is non 0 we can point to secondList and overwrite those numbers with the current numbers and they will be in the correct order. histogram[4] is the first element with a non 0 value. We were given ranges 0-2 from above (see lines 103-106) so we start at 0 and point to secondList[0] and insert 4. Then we continue our iteration over histogram and get to 9 as the next non 0 element. We can point to secondList[1] to insert 9. We are done with this part so it will return to the previous step which is line 102 where it will continuing iterating over its histogram looking for values greater than 1. Refer to the histogram displayed on line 23 as displayed here: HISTOGRAM: 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 BUCKET -------------------------------------- 2 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 VALUES COUNTER We branched off initially from histogram[0] because it had a value greater than 1 but now we are back and can continue. The rest of the elemnts contain either a 0 or 1 so don't need to be sorted anymore. This means secondList contains the sorted array. All that is left is to use threads for each element of secondList and copy their value into the original array ArrayVals because ArrayVals is the one that was sent from the CPU that needs to go back to the CPU. The array is sorted and we are done! **************************************************/ //new #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // #define MAX 2147483647; #define MAX 99; unsigned int * valuesList; unsigned int totalNumbers; void printArray(int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("]\n"); } void printArrayU(unsigned int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("]\n"); } __global__ void radixSort(unsigned int* valuesList, int digit, int arraySize, int* histogram, int* mainOffset, int* mainOffsetAfter) { // each element is corresponds to a bucket from 0-9 // each element initialized to 0. // __shared__ int histogram[10]; // int OFFSETOriginal[10]; __shared__ int OFFSETChanged[10]; int tid = threadIdx.x + blockIdx.x * blockDim.x; // take element in values at this instanced thread and find the digit // we're looking for thats passed in and increment the corresponding element // in the histogram if (tid < arraySize) atomicAdd(&histogram[valuesList[tid]/digit], 1); __syncthreads(); // find offset values // OFFSETOriginal[0] = histogram[0]; OFFSETChanged[0] = histogram[0]; // mainHistogram[0] = histogram[0]; // for testing purposes. mainOffset[0] = histogram[0]; for (int i = 1; i < 10; i++) { // mainHistogram[i] = histogram[i]; // for testing purposes. // OFFSETOriginal[i] = OFFSETOriginal[i-1] + histogram[i]; OFFSETChanged[i] = OFFSETChanged[i-1] + histogram[i]; mainOffset[i] = OFFSETChanged[i]; } // group numbers together by bucket if (tid < arraySize) { // get the value at this instanced threads id that corresponds to the value at its index in valuesList int value = valuesList[tid]; // find the max index this threads value found from valueList by looking in its offsetbucket int index = OFFSETChanged[valuesList[tid]/digit] - 1; // set every element in valuesList to 0. // valuesList[tid] = 0; // OFFSETChanged[valuesList[tid]/digit]--; __syncthreads(); // place the values at their index found above as long as its empty (contains a 0) // if its filled from another thread already placing a value there, // go to the index before it and keep searching down until you find an empty spot // while (valuesList[index] != 0) { // atomicAdd(&OFFSETChanged[valuesList[tid]/digit], -1); // index = OFFSETChanged[valuesList[tid]/digit] - 1; // } int previousValue = value; valuesList[index] = value; atomicAdd(&OFFSETChanged[previousValue/digit], -1); // the list should now be sorted by the 10's digit } __syncthreads(); for (int i = 0; i < 10; i++) { mainOffsetAfter[i] = OFFSETChanged[i]; } return; } __device__ void bucketSort(int* values, int digit) { } int * histogram; int * offset; int * offsetAfter; int main(int argc, char **argv) { totalNumbers = atoi(argv[1]); int histogramSize = 10; valuesList = (unsigned int *)malloc(sizeof(unsigned int)*totalNumbers); histogram = (int*)malloc(sizeof(int)*histogramSize); offset = (int*)malloc(sizeof(int)*histogramSize); offsetAfter = (int*)malloc(sizeof(int)*histogramSize); unsigned int* d_valuesList; int* d_histogram; int* d_offset; int* d_offsetAfter; srand(1); // generate totalNumbers random numbers for valuesList for (int i = 0; i < totalNumbers; i++) { valuesList[i] = (int) rand()%MAX; // valuesList[i] = 26; } printf("VALUES BEFORE:\n"); printArrayU(valuesList, totalNumbers); // fill histogram with 0's for (int i = 0; i < histogramSize; i++) { histogram[i] = 0; offset[i] = 0; offsetAfter[i] = 0; } cudaMalloc((void **) &d_valuesList, sizeof(unsigned int)*totalNumbers); cudaMemcpy(d_valuesList, valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyHostToDevice); cudaMalloc((void**) &d_histogram, sizeof(int)*histogramSize); cudaMemcpy(d_histogram, histogram, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); cudaMalloc((void**) &d_offset, sizeof(int)*histogramSize); cudaMemcpy(d_offset, offset, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); cudaMalloc((void**) &d_offsetAfter, sizeof(int)*histogramSize); cudaMemcpy(d_offsetAfter, offsetAfter, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); // digit should be the number we divide valuesList[i] by to find a particular digit. // i.e. if we are looking for the 10's digit we divid by 10. The 100's digit divid // by 100. 326 divide 100 returns 3. This example we limit our number size to only // be 2 digits (max_rand defined at top to be 50) so we pass in 10 as our digit to // find the left most digit, the 10's digit. // dim3 dimBlock(totalNumbers,1); dim3 dimGrid(totalNumbers/256 ,1, 1); if (totalNumbers%256) dimGrid.x++; dim3 dimBlock (256, 1, 1); int digit = 10; radixSort<<<(totalNumbers+255)/256, 256>>>(d_valuesList, digit, totalNumbers, d_histogram, d_offset, d_offsetAfter); cudaMemcpy(valuesList, d_valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyDeviceToHost); cudaFree(d_valuesList); cudaMemcpy(histogram, d_histogram, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); cudaFree(d_histogram); cudaMemcpy(offset, d_offset, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); cudaFree(d_offset); cudaMemcpy(offsetAfter, d_offsetAfter, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); cudaFree(d_offsetAfter); printf("HISTOGRAM:\n"); printArray(histogram, histogramSize); printf("OFFSET BEFORE:\n"); printArray(offset, histogramSize); printf("OFFSET AFTER:\n"); printArray(offsetAfter, histogramSize); // print valuesList printf("VALUES AFTER:\n"); printArrayU(valuesList, totalNumbers); return 0; }
77dd901ae4cd820f1438dd3740391616fc259962.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <scitbx/array_family/boost_python/flex_fwd.h> #include <cudatbx/cuda_base.cuh> #include <simtbx/gpu/detector.h> #include <simtbx/gpu/detector.cuh> #include <scitbx/vec3.h> #include <scitbx/vec2.h> #define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y) namespace simtbx { namespace gpu { //refactor later into helper file static hipError_t detMemcpyVectorDoubleToDevice(CUDAREAL *dst, const double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } hipError_t ret = hipMemcpy(dst, temp, sizeof(*dst) * vector_items, hipMemcpyHostToDevice); delete temp; return ret; } packed_metrology::packed_metrology(dxtbx::model::Detector const & arg_detector, dxtbx::model::Beam const & arg_beam) { for (std::size_t panel_id = 0; panel_id < arg_detector.size(); panel_id++){ // helper code arising from the nanoBragg constructor, with user_beam=True typedef scitbx::vec3<double> vec3; /* DETECTOR properties */ /* typically: 1 0 0 */ vec3 fdet_vector = arg_detector[panel_id].get_fast_axis(); fdet_vector = fdet_vector.normalize(); /* typically: 0 -1 0 */ vec3 sdet_vector = arg_detector[panel_id].get_slow_axis(); sdet_vector = sdet_vector.normalize(); /* set orthogonal vector to the detector pixel array */ vec3 odet_vector = fdet_vector.cross(sdet_vector); odet_vector = odet_vector.normalize(); /* dxtbx origin is location of outer corner of the first pixel */ vec3 pix0_vector = arg_detector[panel_id].get_origin()/1000.0; /* what is the point of closest approach between sample and detector? */ double close_distance = pix0_vector * odet_vector; if (close_distance < 0){ bool verbose = false; if(verbose)printf("WARNING: dxtbx model is lefthanded. Inverting odet_vector.\n"); odet_vector = -1. * odet_vector; close_distance = -1*close_distance; } sdet.push_back(sdet_vector.length()); fdet.push_back(fdet_vector.length()); odet.push_back(odet_vector.length()); pix0.push_back(0.); for (std::size_t idx_vec = 0; idx_vec < 3; idx_vec++){ sdet.push_back(sdet_vector[idx_vec]); fdet.push_back(fdet_vector[idx_vec]); odet.push_back(odet_vector[idx_vec]); pix0.push_back(pix0_vector[idx_vec]); } /* set beam centre */ scitbx::vec2<double> dials_bc=arg_detector[panel_id].get_beam_centre(arg_beam.get_s0()); dists.push_back(close_distance); Xbeam.push_back(dials_bc[0]/1000.0); Ybeam.push_back(dials_bc[1]/1000.0); } }; packed_metrology::packed_metrology(const simtbx::nanoBragg::nanoBragg& nB){ for (std::size_t idx_vec = 0; idx_vec < 4; idx_vec++){ sdet.push_back(nB.sdet_vector[idx_vec]); fdet.push_back(nB.fdet_vector[idx_vec]); odet.push_back(nB.odet_vector[idx_vec]); pix0.push_back(nB.pix0_vector[idx_vec]); } dists.push_back(nB.close_distance); Xbeam.push_back(nB.Xbeam); Ybeam.push_back(nB.Ybeam); } void packed_metrology::show() const { for (std::size_t idx_p = 0; idx_p < Xbeam.size(); idx_p++){ printf(" Panel %3d\n",idx_p); printf(" Panel %3d sdet %9.6f %9.6f %9.6f %9.6f fdet %9.6f %9.6f %9.6f %9.6f\n", idx_p,sdet[4*idx_p+0],sdet[4*idx_p+1],sdet[4*idx_p+2],sdet[4*idx_p+3], fdet[4*idx_p+0],fdet[4*idx_p+1],fdet[4*idx_p+2],fdet[4*idx_p+3] ); printf(" Panel %3d odet %9.6f %9.6f %9.6f %9.6f pix0 %9.6f %9.6f %9.6f %9.6f\n", idx_p,odet[4*idx_p+0],odet[4*idx_p+1],odet[4*idx_p+2],odet[4*idx_p+3], pix0[4*idx_p+0],pix0[4*idx_p+1],pix0[4*idx_p+2],pix0[4*idx_p+3] ); printf(" Panel %3d beam %11.8f %11.8f\n",idx_p,Xbeam[idx_p],Ybeam[idx_p]); } } void gpu_detector::construct_detail(int const& arg_device_id, dxtbx::model::Detector const & arg_detector){ hipSetDevice(arg_device_id); //1) determine the size cu_n_panels = detector.size(); SCITBX_ASSERT( cu_n_panels >= 1); //2) confirm that array dimensions are similar for each size cu_slow_pixels = detector[0].get_image_size()[0]; cu_fast_pixels = detector[0].get_image_size()[1]; for (int ipanel=1; ipanel < detector.size(); ++ipanel){ SCITBX_ASSERT(detector[ipanel].get_image_size()[0] == cu_slow_pixels); SCITBX_ASSERT(detector[ipanel].get_image_size()[1] == cu_fast_pixels); } _image_size = cu_n_panels * cu_slow_pixels * cu_fast_pixels; //3) allocate a cuda array with these dimensions /* separate accumulator image outside the usual nanoBragg data structure. 1. accumulate contributions from a sequence of source energy channels computed separately 2. represent multiple panels, all same rectangular shape; slowest dimension = n_panels */ cudaSafeCall(hipMalloc((void ** )&cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size)); cudaSafeCall(hipMemset((void *)cu_accumulate_floatimage, 0, sizeof(*cu_accumulate_floatimage) * _image_size)); }; gpu_detector::gpu_detector(int const& arg_device_id, dxtbx::model::Detector const & arg_detector, dxtbx::model::Beam const& arg_beam): h_deviceID(arg_device_id), detector(arg_detector), cu_active_pixel_list(NULL), cu_accumulate_floatimage(NULL), metrology(arg_detector, arg_beam){ construct_detail(arg_device_id, arg_detector); } gpu_detector::gpu_detector(int const& arg_device_id, const simtbx::nanoBragg::nanoBragg& nB): h_deviceID(arg_device_id), metrology(nB), cu_active_pixel_list(NULL), cu_accumulate_floatimage(NULL){ hipSetDevice(arg_device_id); //1) determine the size cu_n_panels = 1; //2) confirm that array dimensions are similar for each size cu_slow_pixels = nB.spixels; cu_fast_pixels = nB.fpixels; _image_size = cu_n_panels * cu_slow_pixels * cu_fast_pixels; //3) allocate a cuda array with these dimensions /* separate accumulator image outside the usual nanoBragg data structure. 1. accumulate contributions from a sequence of source energy channels computed separately 2. represent multiple panels, all same rectangular shape; slowest dimension = n_panels */ cudaSafeCall(hipMalloc((void ** )&cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size)); cudaSafeCall(hipMemset((void *)cu_accumulate_floatimage, 0, sizeof(*cu_accumulate_floatimage) * _image_size)); } void gpu_detector::free_detail(){ hipSetDevice(h_deviceID); //4) make sure we can deallocate cuda array later on if (cu_accumulate_floatimage != NULL) { cudaSafeCall(hipFree(cu_accumulate_floatimage)); } }; void gpu_detector::scale_in_place_cuda(const double& factor){ cudaSafeCall(hipSetDevice(h_deviceID)); hipDeviceProp_t deviceProps = { 0 }; cudaSafeCall(hipGetDeviceProperties(&deviceProps, h_deviceID)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); int total_pixels = _image_size; hipLaunchKernelGGL(( scale_array_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, factor, cu_accumulate_floatimage, total_pixels); } void gpu_detector::write_raw_pixels_cuda(simtbx::nanoBragg::nanoBragg& nB){ //only implement the monolithic detector case, one panel SCITBX_ASSERT(nB.spixels == cu_slow_pixels); SCITBX_ASSERT(nB.fpixels == cu_fast_pixels); SCITBX_ASSERT(cu_n_panels == 1); /* nB.raw_pixels = af::flex_double(af::flex_grid<>(nB.spixels,nB.fpixels)); do not reallocate CPU memory for the data write, as it is not needed */ double * double_floatimage = nB.raw_pixels.begin(); cudaSafeCall(hipSetDevice(nB.device_Id)); cudaSafeCall(hipMemcpy( double_floatimage, cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size, hipMemcpyDeviceToHost)); } af::flex_double gpu_detector::get_raw_pixels_cuda(){ //return the data array for the multipanel detector case af::flex_double z(af::flex_grid<>(cu_n_panels,cu_slow_pixels,cu_fast_pixels), af::init_functor_null<double>()); double* begin = z.begin(); cudaSafeCall(hipSetDevice(h_deviceID)); cudaSafeCall(hipMemcpy( begin, cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size, hipMemcpyDeviceToHost)); return z; } void gpu_detector::set_active_pixels_on_GPU(af::shared<int> active_pixel_list_value){ active_pixel_list = active_pixel_list_value; cudaSafeCall(hipSetDevice(h_deviceID)); int * ptr_active_pixel_list = active_pixel_list.begin(); cudaSafeCall(hipMalloc((void ** )&cu_active_pixel_list, sizeof(*cu_active_pixel_list) * active_pixel_list.size() )); cudaSafeCall(hipMemcpy(cu_active_pixel_list, ptr_active_pixel_list, sizeof(*cu_active_pixel_list) * active_pixel_list.size(), hipMemcpyHostToDevice)); } af::shared<double> gpu_detector::get_whitelist_raw_pixels_cuda(af::shared<std::size_t> selection ){ //return the data array for the multipanel detector case, but only for whitelist pixels af::shared<double> z(active_pixel_list.size(), af::init_functor_null<double>()); double* begin = z.begin(); cudaSafeCall(hipSetDevice(h_deviceID)); CUDAREAL * cu_active_pixel_results; std::size_t * cu_active_pixel_selection; cudaSafeCall(hipMalloc((void ** )&cu_active_pixel_results, sizeof(*cu_active_pixel_results) * active_pixel_list.size() )); cudaSafeCall(hipMalloc((void ** )&cu_active_pixel_selection, sizeof(*cu_active_pixel_selection) * selection.size() )); cudaSafeCall(hipMemcpy(cu_active_pixel_selection, selection.begin(), sizeof(*cu_active_pixel_selection) * selection.size(), hipMemcpyHostToDevice)); hipDeviceProp_t deviceProps = { 0 }; cudaSafeCall(hipGetDeviceProperties(&deviceProps, h_deviceID)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); int total_pixels = active_pixel_list.size(); hipLaunchKernelGGL(( get_active_pixel_selection_CUDAKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cu_active_pixel_results, cu_active_pixel_selection, cu_accumulate_floatimage, total_pixels); cudaSafeCall(hipMemcpy( begin, cu_active_pixel_results, sizeof(*cu_active_pixel_results) * active_pixel_list.size(), hipMemcpyDeviceToHost)); cudaSafeCall(hipFree(cu_active_pixel_selection)); cudaSafeCall(hipFree(cu_active_pixel_results)); return z; } void gpu_detector::each_image_allocate_cuda(){ hipSetDevice(h_deviceID); /*allocate but do not initialize (set to 0) the reductions (the code was too inefficient and was removed as the reductions are not utilized in practice. Should they be needed in the future a faster zeroing API must be found*/ cu_omega_reduction = NULL; cudaSafeCall(hipMalloc((void ** )&cu_omega_reduction, sizeof(*cu_omega_reduction) * _image_size)); cu_max_I_x_reduction = NULL; cudaSafeCall(hipMalloc((void ** )&cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * _image_size)); cu_max_I_y_reduction = NULL; cudaSafeCall(hipMalloc((void ** )&cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * _image_size)); cu_rangemap = NULL; cudaSafeCall(hipMalloc((void ** )&cu_rangemap, sizeof(*cu_rangemap) * _image_size)); cu_maskimage = NULL; int unsigned short * maskimage = NULL; //default case, must implement non-trivial initializer elsewhere if (maskimage != NULL) { cudaSafeCall(hipMalloc((void ** )&cu_maskimage, sizeof(*cu_maskimage) * _image_size)); cudaSafeCall(hipMemcpy(cu_maskimage, maskimage, sizeof(*cu_maskimage) * _image_size, hipMemcpyHostToDevice)); } // In contrast to old API, new API initializes its own accumulator, does not take values from CPU cu_floatimage = NULL; cudaSafeCall(hipMalloc((void ** )&cu_floatimage, sizeof(*cu_floatimage) * _image_size)); const int met_length = metrology.sdet.size(); cudaSafeCall(hipMalloc((void ** )&cu_sdet_vector, sizeof(*cu_sdet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_sdet_vector, metrology.sdet.begin(), met_length)); cudaSafeCall(hipMalloc((void ** )&cu_fdet_vector, sizeof(*cu_fdet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_fdet_vector, metrology.fdet.begin(), met_length)); cudaSafeCall(hipMalloc((void ** )&cu_odet_vector, sizeof(*cu_odet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_odet_vector, metrology.odet.begin(), met_length)); cudaSafeCall(hipMalloc((void ** )&cu_pix0_vector, sizeof(*cu_pix0_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_pix0_vector, metrology.pix0.begin(), met_length)); cudaSafeCall(hipMalloc((void ** )&cu_distance, sizeof(*cu_distance) * metrology.dists.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_distance, metrology.dists.begin(), metrology.dists.size())); cudaSafeCall(hipMalloc((void ** )&cu_Xbeam, sizeof(*cu_Xbeam) * metrology.Xbeam.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_Xbeam, metrology.Xbeam.begin(), metrology.Xbeam.size())); cudaSafeCall(hipMalloc((void ** )&cu_Ybeam, sizeof(*cu_Ybeam) * metrology.Ybeam.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_Ybeam, metrology.Ybeam.begin(), metrology.Ybeam.size())); } void gpu_detector::each_image_free_cuda(){ hipSetDevice(h_deviceID); cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipFree(cu_omega_reduction)); cudaSafeCall(hipFree(cu_max_I_x_reduction)); cudaSafeCall(hipFree(cu_max_I_y_reduction)); cudaSafeCall(hipFree(cu_rangemap)); cudaSafeCall(hipFree(cu_maskimage)); cudaSafeCall(hipFree(cu_floatimage)); cudaSafeCall(hipFree(cu_sdet_vector)); cudaSafeCall(hipFree(cu_fdet_vector)); cudaSafeCall(hipFree(cu_odet_vector)); cudaSafeCall(hipFree(cu_pix0_vector)); cudaSafeCall(hipFree(cu_distance)); cudaSafeCall(hipFree(cu_Xbeam)); cudaSafeCall(hipFree(cu_Ybeam)); cudaSafeCall(hipFree(cu_active_pixel_list)); } } // gpu } // simtbx
77dd901ae4cd820f1438dd3740391616fc259962.cu
#include <scitbx/array_family/boost_python/flex_fwd.h> #include <cudatbx/cuda_base.cuh> #include <simtbx/gpu/detector.h> #include <simtbx/gpu/detector.cuh> #include <scitbx/vec3.h> #include <scitbx/vec2.h> #define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_TOTAL (THREADS_PER_BLOCK_X * THREADS_PER_BLOCK_Y) namespace simtbx { namespace gpu { //refactor later into helper file static cudaError_t detMemcpyVectorDoubleToDevice(CUDAREAL *dst, const double *src, size_t vector_items) { CUDAREAL * temp = new CUDAREAL[vector_items]; for (size_t i = 0; i < vector_items; i++) { temp[i] = src[i]; } cudaError_t ret = cudaMemcpy(dst, temp, sizeof(*dst) * vector_items, cudaMemcpyHostToDevice); delete temp; return ret; } packed_metrology::packed_metrology(dxtbx::model::Detector const & arg_detector, dxtbx::model::Beam const & arg_beam) { for (std::size_t panel_id = 0; panel_id < arg_detector.size(); panel_id++){ // helper code arising from the nanoBragg constructor, with user_beam=True typedef scitbx::vec3<double> vec3; /* DETECTOR properties */ /* typically: 1 0 0 */ vec3 fdet_vector = arg_detector[panel_id].get_fast_axis(); fdet_vector = fdet_vector.normalize(); /* typically: 0 -1 0 */ vec3 sdet_vector = arg_detector[panel_id].get_slow_axis(); sdet_vector = sdet_vector.normalize(); /* set orthogonal vector to the detector pixel array */ vec3 odet_vector = fdet_vector.cross(sdet_vector); odet_vector = odet_vector.normalize(); /* dxtbx origin is location of outer corner of the first pixel */ vec3 pix0_vector = arg_detector[panel_id].get_origin()/1000.0; /* what is the point of closest approach between sample and detector? */ double close_distance = pix0_vector * odet_vector; if (close_distance < 0){ bool verbose = false; if(verbose)printf("WARNING: dxtbx model is lefthanded. Inverting odet_vector.\n"); odet_vector = -1. * odet_vector; close_distance = -1*close_distance; } sdet.push_back(sdet_vector.length()); fdet.push_back(fdet_vector.length()); odet.push_back(odet_vector.length()); pix0.push_back(0.); for (std::size_t idx_vec = 0; idx_vec < 3; idx_vec++){ sdet.push_back(sdet_vector[idx_vec]); fdet.push_back(fdet_vector[idx_vec]); odet.push_back(odet_vector[idx_vec]); pix0.push_back(pix0_vector[idx_vec]); } /* set beam centre */ scitbx::vec2<double> dials_bc=arg_detector[panel_id].get_beam_centre(arg_beam.get_s0()); dists.push_back(close_distance); Xbeam.push_back(dials_bc[0]/1000.0); Ybeam.push_back(dials_bc[1]/1000.0); } }; packed_metrology::packed_metrology(const simtbx::nanoBragg::nanoBragg& nB){ for (std::size_t idx_vec = 0; idx_vec < 4; idx_vec++){ sdet.push_back(nB.sdet_vector[idx_vec]); fdet.push_back(nB.fdet_vector[idx_vec]); odet.push_back(nB.odet_vector[idx_vec]); pix0.push_back(nB.pix0_vector[idx_vec]); } dists.push_back(nB.close_distance); Xbeam.push_back(nB.Xbeam); Ybeam.push_back(nB.Ybeam); } void packed_metrology::show() const { for (std::size_t idx_p = 0; idx_p < Xbeam.size(); idx_p++){ printf(" Panel %3d\n",idx_p); printf(" Panel %3d sdet %9.6f %9.6f %9.6f %9.6f fdet %9.6f %9.6f %9.6f %9.6f\n", idx_p,sdet[4*idx_p+0],sdet[4*idx_p+1],sdet[4*idx_p+2],sdet[4*idx_p+3], fdet[4*idx_p+0],fdet[4*idx_p+1],fdet[4*idx_p+2],fdet[4*idx_p+3] ); printf(" Panel %3d odet %9.6f %9.6f %9.6f %9.6f pix0 %9.6f %9.6f %9.6f %9.6f\n", idx_p,odet[4*idx_p+0],odet[4*idx_p+1],odet[4*idx_p+2],odet[4*idx_p+3], pix0[4*idx_p+0],pix0[4*idx_p+1],pix0[4*idx_p+2],pix0[4*idx_p+3] ); printf(" Panel %3d beam %11.8f %11.8f\n",idx_p,Xbeam[idx_p],Ybeam[idx_p]); } } void gpu_detector::construct_detail(int const& arg_device_id, dxtbx::model::Detector const & arg_detector){ cudaSetDevice(arg_device_id); //1) determine the size cu_n_panels = detector.size(); SCITBX_ASSERT( cu_n_panels >= 1); //2) confirm that array dimensions are similar for each size cu_slow_pixels = detector[0].get_image_size()[0]; cu_fast_pixels = detector[0].get_image_size()[1]; for (int ipanel=1; ipanel < detector.size(); ++ipanel){ SCITBX_ASSERT(detector[ipanel].get_image_size()[0] == cu_slow_pixels); SCITBX_ASSERT(detector[ipanel].get_image_size()[1] == cu_fast_pixels); } _image_size = cu_n_panels * cu_slow_pixels * cu_fast_pixels; //3) allocate a cuda array with these dimensions /* separate accumulator image outside the usual nanoBragg data structure. 1. accumulate contributions from a sequence of source energy channels computed separately 2. represent multiple panels, all same rectangular shape; slowest dimension = n_panels */ cudaSafeCall(cudaMalloc((void ** )&cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size)); cudaSafeCall(cudaMemset((void *)cu_accumulate_floatimage, 0, sizeof(*cu_accumulate_floatimage) * _image_size)); }; gpu_detector::gpu_detector(int const& arg_device_id, dxtbx::model::Detector const & arg_detector, dxtbx::model::Beam const& arg_beam): h_deviceID(arg_device_id), detector(arg_detector), cu_active_pixel_list(NULL), cu_accumulate_floatimage(NULL), metrology(arg_detector, arg_beam){ construct_detail(arg_device_id, arg_detector); } gpu_detector::gpu_detector(int const& arg_device_id, const simtbx::nanoBragg::nanoBragg& nB): h_deviceID(arg_device_id), metrology(nB), cu_active_pixel_list(NULL), cu_accumulate_floatimage(NULL){ cudaSetDevice(arg_device_id); //1) determine the size cu_n_panels = 1; //2) confirm that array dimensions are similar for each size cu_slow_pixels = nB.spixels; cu_fast_pixels = nB.fpixels; _image_size = cu_n_panels * cu_slow_pixels * cu_fast_pixels; //3) allocate a cuda array with these dimensions /* separate accumulator image outside the usual nanoBragg data structure. 1. accumulate contributions from a sequence of source energy channels computed separately 2. represent multiple panels, all same rectangular shape; slowest dimension = n_panels */ cudaSafeCall(cudaMalloc((void ** )&cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size)); cudaSafeCall(cudaMemset((void *)cu_accumulate_floatimage, 0, sizeof(*cu_accumulate_floatimage) * _image_size)); } void gpu_detector::free_detail(){ cudaSetDevice(h_deviceID); //4) make sure we can deallocate cuda array later on if (cu_accumulate_floatimage != NULL) { cudaSafeCall(cudaFree(cu_accumulate_floatimage)); } }; void gpu_detector::scale_in_place_cuda(const double& factor){ cudaSafeCall(cudaSetDevice(h_deviceID)); cudaDeviceProp deviceProps = { 0 }; cudaSafeCall(cudaGetDeviceProperties(&deviceProps, h_deviceID)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); int total_pixels = _image_size; scale_array_CUDAKernel<<<numBlocks, threadsPerBlock>>>( factor, cu_accumulate_floatimage, total_pixels); } void gpu_detector::write_raw_pixels_cuda(simtbx::nanoBragg::nanoBragg& nB){ //only implement the monolithic detector case, one panel SCITBX_ASSERT(nB.spixels == cu_slow_pixels); SCITBX_ASSERT(nB.fpixels == cu_fast_pixels); SCITBX_ASSERT(cu_n_panels == 1); /* nB.raw_pixels = af::flex_double(af::flex_grid<>(nB.spixels,nB.fpixels)); do not reallocate CPU memory for the data write, as it is not needed */ double * double_floatimage = nB.raw_pixels.begin(); cudaSafeCall(cudaSetDevice(nB.device_Id)); cudaSafeCall(cudaMemcpy( double_floatimage, cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size, cudaMemcpyDeviceToHost)); } af::flex_double gpu_detector::get_raw_pixels_cuda(){ //return the data array for the multipanel detector case af::flex_double z(af::flex_grid<>(cu_n_panels,cu_slow_pixels,cu_fast_pixels), af::init_functor_null<double>()); double* begin = z.begin(); cudaSafeCall(cudaSetDevice(h_deviceID)); cudaSafeCall(cudaMemcpy( begin, cu_accumulate_floatimage, sizeof(*cu_accumulate_floatimage) * _image_size, cudaMemcpyDeviceToHost)); return z; } void gpu_detector::set_active_pixels_on_GPU(af::shared<int> active_pixel_list_value){ active_pixel_list = active_pixel_list_value; cudaSafeCall(cudaSetDevice(h_deviceID)); int * ptr_active_pixel_list = active_pixel_list.begin(); cudaSafeCall(cudaMalloc((void ** )&cu_active_pixel_list, sizeof(*cu_active_pixel_list) * active_pixel_list.size() )); cudaSafeCall(cudaMemcpy(cu_active_pixel_list, ptr_active_pixel_list, sizeof(*cu_active_pixel_list) * active_pixel_list.size(), cudaMemcpyHostToDevice)); } af::shared<double> gpu_detector::get_whitelist_raw_pixels_cuda(af::shared<std::size_t> selection ){ //return the data array for the multipanel detector case, but only for whitelist pixels af::shared<double> z(active_pixel_list.size(), af::init_functor_null<double>()); double* begin = z.begin(); cudaSafeCall(cudaSetDevice(h_deviceID)); CUDAREAL * cu_active_pixel_results; std::size_t * cu_active_pixel_selection; cudaSafeCall(cudaMalloc((void ** )&cu_active_pixel_results, sizeof(*cu_active_pixel_results) * active_pixel_list.size() )); cudaSafeCall(cudaMalloc((void ** )&cu_active_pixel_selection, sizeof(*cu_active_pixel_selection) * selection.size() )); cudaSafeCall(cudaMemcpy(cu_active_pixel_selection, selection.begin(), sizeof(*cu_active_pixel_selection) * selection.size(), cudaMemcpyHostToDevice)); cudaDeviceProp deviceProps = { 0 }; cudaSafeCall(cudaGetDeviceProperties(&deviceProps, h_deviceID)); int smCount = deviceProps.multiProcessorCount; dim3 threadsPerBlock(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y); dim3 numBlocks(smCount * 8, 1); int total_pixels = active_pixel_list.size(); get_active_pixel_selection_CUDAKernel<<<numBlocks, threadsPerBlock>>>( cu_active_pixel_results, cu_active_pixel_selection, cu_accumulate_floatimage, total_pixels); cudaSafeCall(cudaMemcpy( begin, cu_active_pixel_results, sizeof(*cu_active_pixel_results) * active_pixel_list.size(), cudaMemcpyDeviceToHost)); cudaSafeCall(cudaFree(cu_active_pixel_selection)); cudaSafeCall(cudaFree(cu_active_pixel_results)); return z; } void gpu_detector::each_image_allocate_cuda(){ cudaSetDevice(h_deviceID); /*allocate but do not initialize (set to 0) the reductions (the code was too inefficient and was removed as the reductions are not utilized in practice. Should they be needed in the future a faster zeroing API must be found*/ cu_omega_reduction = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_omega_reduction, sizeof(*cu_omega_reduction) * _image_size)); cu_max_I_x_reduction = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_max_I_x_reduction, sizeof(*cu_max_I_x_reduction) * _image_size)); cu_max_I_y_reduction = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_max_I_y_reduction, sizeof(*cu_max_I_y_reduction) * _image_size)); cu_rangemap = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_rangemap, sizeof(*cu_rangemap) * _image_size)); cu_maskimage = NULL; int unsigned short * maskimage = NULL; //default case, must implement non-trivial initializer elsewhere if (maskimage != NULL) { cudaSafeCall(cudaMalloc((void ** )&cu_maskimage, sizeof(*cu_maskimage) * _image_size)); cudaSafeCall(cudaMemcpy(cu_maskimage, maskimage, sizeof(*cu_maskimage) * _image_size, cudaMemcpyHostToDevice)); } // In contrast to old API, new API initializes its own accumulator, does not take values from CPU cu_floatimage = NULL; cudaSafeCall(cudaMalloc((void ** )&cu_floatimage, sizeof(*cu_floatimage) * _image_size)); const int met_length = metrology.sdet.size(); cudaSafeCall(cudaMalloc((void ** )&cu_sdet_vector, sizeof(*cu_sdet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_sdet_vector, metrology.sdet.begin(), met_length)); cudaSafeCall(cudaMalloc((void ** )&cu_fdet_vector, sizeof(*cu_fdet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_fdet_vector, metrology.fdet.begin(), met_length)); cudaSafeCall(cudaMalloc((void ** )&cu_odet_vector, sizeof(*cu_odet_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_odet_vector, metrology.odet.begin(), met_length)); cudaSafeCall(cudaMalloc((void ** )&cu_pix0_vector, sizeof(*cu_pix0_vector) * met_length)); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_pix0_vector, metrology.pix0.begin(), met_length)); cudaSafeCall(cudaMalloc((void ** )&cu_distance, sizeof(*cu_distance) * metrology.dists.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_distance, metrology.dists.begin(), metrology.dists.size())); cudaSafeCall(cudaMalloc((void ** )&cu_Xbeam, sizeof(*cu_Xbeam) * metrology.Xbeam.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_Xbeam, metrology.Xbeam.begin(), metrology.Xbeam.size())); cudaSafeCall(cudaMalloc((void ** )&cu_Ybeam, sizeof(*cu_Ybeam) * metrology.Ybeam.size())); cudaSafeCall(detMemcpyVectorDoubleToDevice(cu_Ybeam, metrology.Ybeam.begin(), metrology.Ybeam.size())); } void gpu_detector::each_image_free_cuda(){ cudaSetDevice(h_deviceID); cudaSafeCall(cudaDeviceSynchronize()); cudaSafeCall(cudaFree(cu_omega_reduction)); cudaSafeCall(cudaFree(cu_max_I_x_reduction)); cudaSafeCall(cudaFree(cu_max_I_y_reduction)); cudaSafeCall(cudaFree(cu_rangemap)); cudaSafeCall(cudaFree(cu_maskimage)); cudaSafeCall(cudaFree(cu_floatimage)); cudaSafeCall(cudaFree(cu_sdet_vector)); cudaSafeCall(cudaFree(cu_fdet_vector)); cudaSafeCall(cudaFree(cu_odet_vector)); cudaSafeCall(cudaFree(cu_pix0_vector)); cudaSafeCall(cudaFree(cu_distance)); cudaSafeCall(cudaFree(cu_Xbeam)); cudaSafeCall(cudaFree(cu_Ybeam)); cudaSafeCall(cudaFree(cu_active_pixel_list)); } } // gpu } // simtbx
8845151ceb48eb482cda04c8ed26bdc14fc2ed91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" typedef unsigned char Rgb[3]; typedef float Vec2[2]; typedef float Vec3[3]; #include <stdio.h> #define ww 16 #define hh 16 __device__ __host__ inline float edgeFunction(const Vec2 &a, const Vec2 &b, const Vec2 &c) { return (c[0] - a[0]) * (b[1] - a[1]) - (c[1] - a[1]) * (b[0] - a[0]); } __global__ void rasterize_triangle(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d, const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){ for(int k = 0; k < num_triangles; k++){ int tx = threadIdx.x; int ty = threadIdx.y; int j = ty + blockIdx.y * blockDim.y; // rows int i = tx + blockIdx.x * blockDim.x; // cols Vec2 a = {x0_d[k], y0_d[k]}; Vec2 b = {x1_d[k], y1_d[k]}; Vec2 c = {x2_d[k], y2_d[k]}; float area = edgeFunction(a,b,c); Vec2 p = {i + 0.5f, j + 0.5f}; int index = (i + j * w)*3; float alpha = edgeFunction(b,c,p); float beta = edgeFunction(c,a,p); float gamma = edgeFunction(a,b,p); if(alpha >= 0 && beta >= 0 && gamma >= 0){ alpha = alpha / area; beta = beta / area; gamma = gamma / area; float r = alpha; float g = beta; float bb = gamma; if(i < 512 && j < 512){ framebuffer_d[index] = (unsigned char)(r * 255); framebuffer_d[index + 1] = (unsigned char)(g * 255); framebuffer_d[index + 2] = (unsigned char)(bb * 255); } } } } void basicTriRast(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d, const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){ const unsigned int BLOCK_SIZE = 32; dim3 BlocksPerGrid(ceil(double(512)/BLOCK_SIZE),ceil(double(512)/BLOCK_SIZE),1); dim3 ThreadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1); hipLaunchKernelGGL(( rasterize_triangle), dim3(BlocksPerGrid), dim3(ThreadsPerBlock), 0, 0, framebuffer_d, x0_d, x1_d, x2_d, y0_d, y1_d, y2_d, w, h, num_triangles); }
8845151ceb48eb482cda04c8ed26bdc14fc2ed91.cu
typedef unsigned char Rgb[3]; typedef float Vec2[2]; typedef float Vec3[3]; #include <stdio.h> #define ww 16 #define hh 16 __device__ __host__ inline float edgeFunction(const Vec2 &a, const Vec2 &b, const Vec2 &c) { return (c[0] - a[0]) * (b[1] - a[1]) - (c[1] - a[1]) * (b[0] - a[0]); } __global__ void rasterize_triangle(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d, const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){ for(int k = 0; k < num_triangles; k++){ int tx = threadIdx.x; int ty = threadIdx.y; int j = ty + blockIdx.y * blockDim.y; // rows int i = tx + blockIdx.x * blockDim.x; // cols Vec2 a = {x0_d[k], y0_d[k]}; Vec2 b = {x1_d[k], y1_d[k]}; Vec2 c = {x2_d[k], y2_d[k]}; float area = edgeFunction(a,b,c); Vec2 p = {i + 0.5f, j + 0.5f}; int index = (i + j * w)*3; float alpha = edgeFunction(b,c,p); float beta = edgeFunction(c,a,p); float gamma = edgeFunction(a,b,p); if(alpha >= 0 && beta >= 0 && gamma >= 0){ alpha = alpha / area; beta = beta / area; gamma = gamma / area; float r = alpha; float g = beta; float bb = gamma; if(i < 512 && j < 512){ framebuffer_d[index] = (unsigned char)(r * 255); framebuffer_d[index + 1] = (unsigned char)(g * 255); framebuffer_d[index + 2] = (unsigned char)(bb * 255); } } } } void basicTriRast(unsigned char * framebuffer_d, const float * x0_d, const float * x1_d, const float * x2_d, const float * y0_d, const float * y1_d, const float * y2_d, const int w, const int h, const int num_triangles){ const unsigned int BLOCK_SIZE = 32; dim3 BlocksPerGrid(ceil(double(512)/BLOCK_SIZE),ceil(double(512)/BLOCK_SIZE),1); dim3 ThreadsPerBlock(BLOCK_SIZE,BLOCK_SIZE,1); rasterize_triangle<<<BlocksPerGrid, ThreadsPerBlock>>>(framebuffer_d, x0_d, x1_d, x2_d, y0_d, y1_d, y2_d, w, h, num_triangles); }
2103a21c09d657fc6c1d12c19e4a8b3700c0e3f3.hip
// !!! This is a file automatically generated by hipify!!! // Baseado em: https://docs.nvidia.com/cuda/hiprand/ #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <hiprand/hiprand_kernel.h> #include "rocblas.h" #include <iostream> #include <iomanip> struct estimate_pi : public thrust::unary_function<unsigned int, float> { __device__ float operator()(unsigned int thread_id) { hiprandState_t s; float sum = 0; unsigned int N = 1<<16; // numero de amostras por thread //hipblasHandle_t handle; //hipblasCreate(&handle); hiprand_init(thread_id, 0, 0, &s); // use como seed o identificador da chamada da funcao for(unsigned int i = 0; i < N; ++i) { // calcule para um quarto de circulo N vezes float x = hiprand_uniform(&s); float y = hiprand_uniform(&s); float dist = sqrtf(x*x + y*y); // calcule a distancia ate a origem if(dist <= 1.0f) // se cair no circulo adicione 1 sum += 1.0f; } sum *= 4.0f; // multiplique por 4 para ter a rea toda do crculo //hipblasDestroy(handle); return sum / N; // divida pelo numero de amostras } }; int main(void) { int M = 1<<20; float estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi(), 0.0f, thrust::plus<float>()); estimate /= M; std::cout << std::setprecision(7); std::cout << "pi e' aproximadamente "; std::cout << estimate << std::endl; return 0; }
2103a21c09d657fc6c1d12c19e4a8b3700c0e3f3.cu
// Baseado em: https://docs.nvidia.com/cuda/curand/ #include <thrust/iterator/counting_iterator.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> #include <curand_kernel.h> #include "cublas_v2.h" #include <iostream> #include <iomanip> struct estimate_pi : public thrust::unary_function<unsigned int, float> { __device__ float operator()(unsigned int thread_id) { curandState s; float sum = 0; unsigned int N = 1<<16; // numero de amostras por thread //cublasHandle_t handle; //cublasCreate(&handle); curand_init(thread_id, 0, 0, &s); // use como seed o identificador da chamada da funcao for(unsigned int i = 0; i < N; ++i) { // calcule para um quarto de circulo N vezes float x = curand_uniform(&s); float y = curand_uniform(&s); float dist = sqrtf(x*x + y*y); // calcule a distancia ate a origem if(dist <= 1.0f) // se cair no circulo adicione 1 sum += 1.0f; } sum *= 4.0f; // multiplique por 4 para ter a área toda do círculo //cublasDestroy(handle); return sum / N; // divida pelo numero de amostras } }; int main(void) { int M = 1<<20; float estimate = thrust::transform_reduce( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(M), estimate_pi(), 0.0f, thrust::plus<float>()); estimate /= M; std::cout << std::setprecision(7); std::cout << "pi e' aproximadamente "; std::cout << estimate << std::endl; return 0; }
d9507108cb0c7604897a08b870321cde646c6b19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Babak Poursartip 02/27/2021 CUDA topic: stream. - Instead of using malloc or new to allocation memory on the CPU(host), we use hipHostMalloc(). This will allocate a pinned memory on the host. - To free the memory, we use hipHostFree, instead of delete to deallocate. - The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory. */ #include <iostream> #include <cmath> #include <ctime> // ============================== const int chunkCount = 1 << 20; const int totalCount = chunkCount << 3; __global__ void kernel(float *a, float *b, float *c) { int tid = blockDim.x * blockIdx.x+threadIdx.x; if (tid < chunkCount) c[tid] = erff(a[tid]+b[tid]); } // ============================== int main() { printf(" starts \n"); hipDeviceProp_t prop; int device; hipGetDevice(&device); hipGetDeviceProperties(&prop, device); if (!prop.deviceOverlap) { return 0; } hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipStream_t stream; hipStreamCreate(&stream); float *ha, *hb, *hc, *da, *db, *dc; const int totalSize = totalCount * sizeof(float); const int chunkSize = chunkCount * sizeof(float); hipMalloc(&da, chunkSize); hipMalloc(&db, chunkSize); hipMalloc(&dc, chunkSize); hipHostMalloc(&ha, totalSize, hipHostMallocDefault); hipHostMalloc(&hb, totalSize, hipHostMallocDefault); hipHostMalloc(&hc, totalSize, hipHostMallocDefault); srand((unsigned)time(0)); // random numbers for the two input vectors for (int i = 0; i < totalCount; ++i) { ha[i] = rand() / RAND_MAX; hb[i] = rand() / RAND_MAX; } hipEventRecord(start, stream); for (int i = 0; i < totalCount; i += chunkCount) { hipMemcpyAsync(da, ha+i, chunkSize, hipMemcpyHostToDevice, stream); hipMemcpyAsync(db, ha+i, chunkSize, hipMemcpyHostToDevice, stream); hipLaunchKernelGGL(( kernel), dim3(chunkCount/64),dim3(64),0,stream, da, db, dc); hipMemcpyAsync(hc+i, dc, chunkSize, hipMemcpyHostToDevice, stream); } hipStreamSynchronize(stream); hipEventRecord(end, stream); hipEventSynchronize(end); float elapsed; hipEventElapsedTime(&elapsed, start, end); std::cout << " it took(ms): " << elapsed << std::endl; hipHostFree(ha); hipHostFree(hb); hipHostFree(hc); hipFree(da); hipFree(db); hipFree(dc); hipStreamDestroy(stream); printf(" done \n"); return 0; }
d9507108cb0c7604897a08b870321cde646c6b19.cu
/* Babak Poursartip 02/27/2021 CUDA topic: stream. - Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host. - To free the memory, we use cudaFreeHost, instead of delete to deallocate. - The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory. */ #include <iostream> #include <cmath> #include <ctime> // ============================== const int chunkCount = 1 << 20; const int totalCount = chunkCount << 3; __global__ void kernel(float *a, float *b, float *c) { int tid = blockDim.x * blockIdx.x+threadIdx.x; if (tid < chunkCount) c[tid] = erff(a[tid]+b[tid]); } // ============================== int main() { printf(" starts \n"); cudaDeviceProp prop; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); if (!prop.deviceOverlap) { return 0; } cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaStream_t stream; cudaStreamCreate(&stream); float *ha, *hb, *hc, *da, *db, *dc; const int totalSize = totalCount * sizeof(float); const int chunkSize = chunkCount * sizeof(float); cudaMalloc(&da, chunkSize); cudaMalloc(&db, chunkSize); cudaMalloc(&dc, chunkSize); cudaHostAlloc(&ha, totalSize, cudaHostAllocDefault); cudaHostAlloc(&hb, totalSize, cudaHostAllocDefault); cudaHostAlloc(&hc, totalSize, cudaHostAllocDefault); srand((unsigned)time(0)); // random numbers for the two input vectors for (int i = 0; i < totalCount; ++i) { ha[i] = rand() / RAND_MAX; hb[i] = rand() / RAND_MAX; } cudaEventRecord(start, stream); for (int i = 0; i < totalCount; i += chunkCount) { cudaMemcpyAsync(da, ha+i, chunkSize, cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(db, ha+i, chunkSize, cudaMemcpyHostToDevice, stream); kernel<<<chunkCount/64,64,0,stream>>>(da, db, dc); cudaMemcpyAsync(hc+i, dc, chunkSize, cudaMemcpyHostToDevice, stream); } cudaStreamSynchronize(stream); cudaEventRecord(end, stream); cudaEventSynchronize(end); float elapsed; cudaEventElapsedTime(&elapsed, start, end); std::cout << " it took(ms): " << elapsed << std::endl; cudaFreeHost(ha); cudaFreeHost(hb); cudaFreeHost(hc); cudaFree(da); cudaFree(db); cudaFree(dc); cudaStreamDestroy(stream); printf(" done \n"); return 0; }
74ce14ae5a1fab5e8fd2752722250ded384b4714.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cube_select_four.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int b = 2; int n = XSIZE*YSIZE; float radius = 1; const float *xyz = NULL; hipMalloc(&xyz, XSIZE*YSIZE); int *idx_out = NULL; hipMalloc(&idx_out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cube_select_four), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cube_select_four), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cube_select_four), dim3(gridBlock),dim3(threadBlock), 0, 0, b,n,radius,xyz,idx_out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
74ce14ae5a1fab5e8fd2752722250ded384b4714.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cube_select_four.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int b = 2; int n = XSIZE*YSIZE; float radius = 1; const float *xyz = NULL; cudaMalloc(&xyz, XSIZE*YSIZE); int *idx_out = NULL; cudaMalloc(&idx_out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cube_select_four<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cube_select_four<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cube_select_four<<<gridBlock,threadBlock>>>(b,n,radius,xyz,idx_out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1e9f5a22b836f95124aecdcb95b97e548b4112dd.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/cudf_test_utils.cuh> #include <cudf/rolling.hpp> #include <src/rolling/rolling_detail.hpp> #include <cudf/cudf.h> #include <utilities/error_utils.hpp> #include <utilities/cudf_utils.h> #include <tests/utilities/column_wrapper.cuh> #include <gtest/gtest.h> #include <vector> #include <random> #include <algorithm> #include <memory> template <typename T> class RollingTest : public GdfTest { protected: // integral types template <typename U = T, typename std::enable_if_t<std::is_integral<U>::value, std::nullptr_t> = nullptr> const T random_value(std::mt19937 &rng) { return rng() % std::numeric_limits<T>::max() + 1; } // non-integral types (e.g. floating point) template <typename U = T, typename std::enable_if_t<!std::is_integral<U>::value, std::nullptr_t> = nullptr> const T random_value(std::mt19937 &rng) { return rng() / 10000.0; } // input as column_wrapper void run_test_col(const cudf::test::column_wrapper<T> &input, gdf_size_type w, gdf_size_type m, gdf_size_type f, const std::vector<gdf_size_type> &window, const std::vector<gdf_size_type> &min_periods, const std::vector<gdf_size_type> &forward_window, gdf_agg_op agg) { // it's not possible to check sizes in the rolling window API since we pass raw pointers for window/periods // so we check here that the tests are setup correctly CUDF_EXPECTS(window.size() == 0 || window.size() == (size_t)input.size(), "Window array size != input column size"); CUDF_EXPECTS(min_periods.size() == 0 || min_periods.size() == (size_t)input.size(), "Min periods array size != input column size"); CUDF_EXPECTS(forward_window.size() == 0 || forward_window.size() == (size_t)input.size(), "Forward window array size != input column size"); // copy the input to host std::vector<gdf_valid_type> valid; std::tie(in_col, valid) = input.to_host(); in_col_valid.resize(in_col.size()); for (size_t row = 0; row < in_col.size(); row++) in_col_valid[row] = gdf_is_valid(valid.data(), row); gdf_size_type *d_window = NULL; gdf_size_type *d_min_periods = NULL; gdf_size_type *d_forward_window = NULL; // copy sizes to the gpu if (window.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_window, window.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(hipMemcpy(d_window, window.data(), window.size() * sizeof(gdf_size_type), hipMemcpyDefault)); } if (min_periods.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_min_periods, min_periods.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(hipMemcpy(d_min_periods, min_periods.data(), min_periods.size() * sizeof(gdf_size_type), hipMemcpyDefault)); } if (forward_window.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_forward_window, forward_window.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(hipMemcpy(d_forward_window, forward_window.data(), forward_window.size() * sizeof(gdf_size_type), hipMemcpyDefault)); } out_gdf_col = { cudf::rolling_window(*input.get(), w, m, f, agg, d_window, d_min_periods, d_forward_window), deleter }; create_reference_output(agg, w, m, f, window, min_periods, forward_window); compare_gdf_result(); // free GPU memory if (d_window != NULL) EXPECT_EQ(RMM_FREE(d_window, 0), RMM_SUCCESS); if (d_min_periods != NULL) EXPECT_EQ(RMM_FREE(d_min_periods, 0), RMM_SUCCESS); if (d_forward_window != NULL) EXPECT_EQ(RMM_FREE(d_forward_window, 0), RMM_SUCCESS); } // input as data and validity mask void run_test_col(const std::vector<T> &data, const std::vector<bool> &mask, gdf_size_type w, gdf_size_type m, gdf_size_type f, const std::vector<gdf_size_type> &window, const std::vector<gdf_size_type> &min_periods, const std::vector<gdf_size_type> &forward_window, gdf_agg_op agg) { CUDF_EXPECTS(data.size() == mask.size(), "Validity array size != input column size"); cudf::test::column_wrapper<T> input{ (gdf_size_type)data.size(), [&](gdf_index_type row) { return data[row]; }, [&](gdf_index_type row) { return mask[row]; } }; run_test_col(input, w, m, f, window, min_periods, forward_window, agg); } // helper function to test all aggregators template<class... TArgs> void run_test_col_agg(TArgs... FArgs) { // test all supported aggregators run_test_col(FArgs..., GDF_SUM); run_test_col(FArgs..., GDF_MIN); run_test_col(FArgs..., GDF_MAX); run_test_col(FArgs..., GDF_COUNT); run_test_col(FArgs..., GDF_AVG); // this aggregation function is not supported yet - expected to throw an exception EXPECT_THROW(run_test_col(FArgs..., GDF_COUNT_DISTINCT), cudf::logic_error); } private: // use SFINAE to only instantiate for supported combinations template<class agg_op, bool average, typename std::enable_if_t<cudf::detail::is_supported<T, agg_op>(), std::nullptr_t> = nullptr> void create_reference_output(gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { // compute the reference solution on the cpu gdf_size_type nrows = in_col.size(); ref_data.resize(nrows); ref_data_valid.resize(nrows); agg_op op; for(gdf_size_type i = 0; i < nrows; i++) { T val = agg_op::template identity<T>(); gdf_size_type count = 0; // load sizes if (window_col.size() > 0) window = window_col[i]; if (min_periods_col.size() > 0) min_periods = min_periods_col[i]; min_periods = ::max(min_periods, 1); // at least one observation is required if (forward_window_col.size() > 0) forward_window = forward_window_col[i]; // compute bounds gdf_size_type start_index = ::max((gdf_size_type)0, i - window + 1); gdf_size_type end_index = ::min(nrows, i + forward_window + 1); // exclusive // aggregate for (gdf_size_type j = start_index; j < end_index; j++) { if (in_col_valid.size() == 0 || in_col_valid[j]) { val = op(in_col[j], val); count++; } } ref_data_valid[i] = (count >= min_periods); if (ref_data_valid[i]) { cudf::detail::store_output_functor<T, average>{}(ref_data[i], val, count); } } } template<class agg_op, bool average, typename std::enable_if_t<!cudf::detail::is_supported<T, agg_op>(), std::nullptr_t> = nullptr> void create_reference_output(gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { CUDF_FAIL("Unsupported combination of type and aggregation"); } void create_reference_output(gdf_agg_op agg, gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { // unroll aggregation types switch(agg) { case GDF_SUM: create_reference_output<cudf::DeviceSum, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MIN: create_reference_output<cudf::DeviceMin, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MAX: create_reference_output<cudf::DeviceMax, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_COUNT: create_reference_output<cudf::DeviceCount, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_AVG: create_reference_output<cudf::DeviceSum, true>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; default: FAIL() << "aggregation type not supported"; } } void compare_gdf_result() { // convert to column_wrapper to compare // copy output data to host gdf_size_type nrows = in_col.size(); std::vector<T> out_col(nrows); CUDA_TRY(hipMemcpy(out_col.data(), static_cast<T*>(out_gdf_col->data), nrows * sizeof(T), hipMemcpyDefault)); // copy output valid mask to host gdf_size_type nmasks = gdf_valid_allocation_size(nrows); std::vector<gdf_valid_type> out_col_mask(nmasks); CUDA_TRY(hipMemcpy(out_col_mask.data(), static_cast<gdf_valid_type*>(out_gdf_col->valid), nmasks * sizeof(gdf_valid_type), hipMemcpyDefault)); // create column wrappers and compare cudf::test::column_wrapper<T> out(out_col, [&](gdf_index_type i) { return gdf_is_valid(out_col_mask.data(), i); } ); cudf::test::column_wrapper<T> ref(ref_data, [&](gdf_index_type i) { return ref_data_valid[i]; } ); // print the columns for debugging //out.print(); //ref.print(); ASSERT_TRUE(out == ref); } // input std::vector<T> in_col; std::vector<bool> in_col_valid; // reference std::vector<T> ref_data; std::vector<bool> ref_data_valid; // output gdf_col_pointer out_gdf_col; // column deleter const std::function<void(gdf_column*)> deleter = [](gdf_column* col) { col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); }; }; // ------------- arithmetic types -------------------- using ArithmeticTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, double>; TYPED_TEST_CASE(RollingTest, ArithmeticTypes); TYPED_TEST(RollingTest, EmptyInput) { cudf::test::column_wrapper<TypeParam> input(0); this->run_test_col(input, 2, 2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_SUM); } // simple example from Pandas docs TYPED_TEST(RollingTest, SimpleStatic) { // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // static sizes this->run_test_col_agg(col_data, col_mask, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // simple example from Pandas docs: TYPED_TEST(RollingTest, SimpleDynamic) { // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // dynamic sizes this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 2 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 2 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 1 })); } // this is a special test to check the volatile count variable issue (see rolling.cu for detail) TYPED_TEST(RollingTest, VolatileCount) { const std::vector<TypeParam> col_data = { 8, 70, 45, 20, 59, 80 }; const std::vector<bool> col_mask = { 1, 1, 0, 0, 1, 0 }; // dynamic sizes this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 5, 9, 4, 8, 3, 3 }), std::vector<gdf_size_type>({ 1, 1, 9, 2, 8, 9 }), std::vector<gdf_size_type>({ 6, 3, 3, 0, 2, 1 })); } // all rows are invalid TYPED_TEST(RollingTest, AllInvalid) { gdf_size_type num_rows = 1000; gdf_size_type window = 100; gdf_size_type periods = window; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_mask.begin(), col_mask.end(), 0); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // window = forward_window = 0 TYPED_TEST(RollingTest, ZeroWindow) { gdf_size_type num_rows = 1000; gdf_size_type window = 0; gdf_size_type periods = num_rows; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // min_periods = 0 TYPED_TEST(RollingTest, ZeroPeriods) { gdf_size_type num_rows = 1000; gdf_size_type window = num_rows; gdf_size_type periods = 0; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // window in one direction is not large enough to collect enough samples, // but if using both directions we should get == min_periods, // also tests out of boundary accesses TYPED_TEST(RollingTest, BackwardForwardWindow) { gdf_size_type num_rows = 1000; gdf_size_type window = num_rows; gdf_size_type periods = num_rows; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, static parameters, no nulls TYPED_TEST(RollingTest, RandomStaticAllValid) { gdf_size_type num_rows = 10000; gdf_size_type window = 50; gdf_size_type min_periods = 50; // random input std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); } }; this->run_test_col_agg(input, window, min_periods, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, static parameters, with nulls TYPED_TEST(RollingTest, RandomStaticWithInvalid) { gdf_size_type num_rows = 10000; gdf_size_type window = 50; gdf_size_type min_periods = 25; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; this->run_test_col_agg(input, window, min_periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, dynamic parameters, no nulls TYPED_TEST(RollingTest, RandomDynamicAllValid) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; // random input std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> min_periods(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(min_periods.begin(), min_periods.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, 0, 0, window, min_periods, forward_window); } // random input data, dynamic parameters, with nulls TYPED_TEST(RollingTest, RandomDynamicWithInvalid) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> min_periods(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(min_periods.begin(), min_periods.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, 0, 0, window, min_periods, forward_window); } // mix of static and dynamic parameters TYPED_TEST(RollingTest, RandomDynamicWindowStaticPeriods) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; gdf_size_type min_periods = 25; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, min_periods, 0, window, std::vector<gdf_size_type>(), forward_window); } // ------------- expected failures -------------------- // negative sizes TYPED_TEST(RollingTest, NegativeSizes) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, -2, 2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 2, -2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 2, 2, -2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); } // validity size mismatch TYPED_TEST(RollingTest, ValidSizeMismatch) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0}; // validity mask size mismatch EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2, 3 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })), cudf::logic_error); } // window array size mismatch TYPED_TEST(RollingTest, WindowArraySizeMismatch) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // this runs ok this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2, 3 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })); // mismatch for the window array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })), cudf::logic_error); // mismatch for the periods array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 3 }), std::vector<gdf_size_type>({ 1, 2, 3, 4 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 })), cudf::logic_error); // mismatch for the forward window array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 3 }), std::vector<gdf_size_type>({ 1, 2, 3, 4, 6 }), std::vector<gdf_size_type>({ 2, 1, 2, 1 })), cudf::logic_error); } // ------------- non-arithmetic types -------------------- using NonArithmeticTypes = ::testing::Types<cudf::category, cudf::timestamp, cudf::date32, cudf::date64, cudf::bool8>; template<typename T> using RollingTestNonArithmetic = RollingTest<T>; TYPED_TEST_CASE(RollingTestNonArithmetic, NonArithmeticTypes); // incorrect type/aggregation combo: sum or avg for non-arithmetic types TYPED_TEST(RollingTestNonArithmetic, SumAvgNonArithmetic) { constexpr gdf_size_type size{1000}; cudf::test::column_wrapper<TypeParam> input{ size, [](gdf_index_type row) { return static_cast<TypeParam>(row); }, [](gdf_index_type row) { return row % 2; } }; EXPECT_THROW(this->run_test_col( input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_SUM), cudf::logic_error); EXPECT_THROW(this->run_test_col( input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_AVG), cudf::logic_error); } // min/max/count should work for non-arithmetic types TYPED_TEST(RollingTestNonArithmetic, MinMaxCountNonArithmetic) { constexpr gdf_size_type size{1000}; cudf::test::column_wrapper<TypeParam> input{ size, [](gdf_index_type row) { return static_cast<TypeParam>(row); }, [](gdf_index_type row) { return row % 2; } }; this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_MIN); this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_MAX); this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_COUNT); } class RollingTestNumba : public GdfTest {}; TEST_F(RollingTestNumba, NumbaGeneric) { const char ptx[] = R"***( // // Generated by NVIDIA NVVM Compiler // // Compiler Build ID: CL-24817639 // Cuda compilation tools, release 10.0, V10.0.130 // Based on LLVM 3.4svn // .version 6.3 .target sm_70 .address_size 64 // .globl _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE .common .global .align 8 .u64 _ZN08NumbaEnv8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE; .visible .func (.param .b32 func_retval0) _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE( .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_0, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_1, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_2, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_3, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_4, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_5, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_6, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_7 ) { .reg .pred %p<3>; .reg .b32 %r<6>; .reg .b64 %rd<18>; ld.param.u64 %rd6, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_0]; ld.param.u64 %rd7, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_5]; ld.param.u64 %rd8, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_6]; ld.param.u64 %rd9, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_7]; mov.u64 %rd15, 0; mov.u64 %rd16, %rd15; BB0_1: mov.u64 %rd2, %rd16; mov.u32 %r5, 0; setp.ge.s64 %p1, %rd15, %rd8; mov.u64 %rd17, %rd15; @%p1 bra BB0_3; mul.lo.s64 %rd12, %rd15, %rd9; add.s64 %rd13, %rd12, %rd7; ld.u32 %r5, [%rd13]; add.s64 %rd17, %rd15, 1; BB0_3: cvt.s64.s32 %rd14, %r5; add.s64 %rd16, %rd14, %rd2; setp.lt.s64 %p2, %rd15, %rd8; mov.u64 %rd15, %rd17; @%p2 bra BB0_1; st.u64 [%rd6], %rd2; mov.u32 %r4, 0; st.param.b32 [func_retval0+0], %r4; ret; } )***"; constexpr gdf_size_type size{12}; cudf::test::column_wrapper<int> input{ size, [](gdf_index_type row) { return static_cast<int>(row); }, [](gdf_index_type row) { return true; } }; gdf_column output; EXPECT_NO_THROW( output = cudf::rolling_window(*input.get(), 2, 4, 2, ptx, GDF_NUMBA_GENERIC_AGG_OPS, GDF_INT64, nullptr, nullptr, nullptr) ); auto output_wrapper = cudf::test::column_wrapper<int64_t>(output); cudf::test::column_wrapper<int64_t> expect{ size, [](gdf_index_type row) { return static_cast<int>(row*4+2); }, [](gdf_index_type row) { return (row != 0 && row != size-2 && row != size-1); } }; EXPECT_TRUE(output_wrapper == expect); gdf_column_free(&output); }
1e9f5a22b836f95124aecdcb95b97e548b4112dd.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <tests/utilities/cudf_test_fixtures.h> #include <tests/utilities/cudf_test_utils.cuh> #include <cudf/rolling.hpp> #include <src/rolling/rolling_detail.hpp> #include <cudf/cudf.h> #include <utilities/error_utils.hpp> #include <utilities/cudf_utils.h> #include <tests/utilities/column_wrapper.cuh> #include <gtest/gtest.h> #include <vector> #include <random> #include <algorithm> #include <memory> template <typename T> class RollingTest : public GdfTest { protected: // integral types template <typename U = T, typename std::enable_if_t<std::is_integral<U>::value, std::nullptr_t> = nullptr> const T random_value(std::mt19937 &rng) { return rng() % std::numeric_limits<T>::max() + 1; } // non-integral types (e.g. floating point) template <typename U = T, typename std::enable_if_t<!std::is_integral<U>::value, std::nullptr_t> = nullptr> const T random_value(std::mt19937 &rng) { return rng() / 10000.0; } // input as column_wrapper void run_test_col(const cudf::test::column_wrapper<T> &input, gdf_size_type w, gdf_size_type m, gdf_size_type f, const std::vector<gdf_size_type> &window, const std::vector<gdf_size_type> &min_periods, const std::vector<gdf_size_type> &forward_window, gdf_agg_op agg) { // it's not possible to check sizes in the rolling window API since we pass raw pointers for window/periods // so we check here that the tests are setup correctly CUDF_EXPECTS(window.size() == 0 || window.size() == (size_t)input.size(), "Window array size != input column size"); CUDF_EXPECTS(min_periods.size() == 0 || min_periods.size() == (size_t)input.size(), "Min periods array size != input column size"); CUDF_EXPECTS(forward_window.size() == 0 || forward_window.size() == (size_t)input.size(), "Forward window array size != input column size"); // copy the input to host std::vector<gdf_valid_type> valid; std::tie(in_col, valid) = input.to_host(); in_col_valid.resize(in_col.size()); for (size_t row = 0; row < in_col.size(); row++) in_col_valid[row] = gdf_is_valid(valid.data(), row); gdf_size_type *d_window = NULL; gdf_size_type *d_min_periods = NULL; gdf_size_type *d_forward_window = NULL; // copy sizes to the gpu if (window.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_window, window.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(cudaMemcpy(d_window, window.data(), window.size() * sizeof(gdf_size_type), cudaMemcpyDefault)); } if (min_periods.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_min_periods, min_periods.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(cudaMemcpy(d_min_periods, min_periods.data(), min_periods.size() * sizeof(gdf_size_type), cudaMemcpyDefault)); } if (forward_window.size() > 0) { EXPECT_EQ(RMM_ALLOC(&d_forward_window, forward_window.size() * sizeof(gdf_size_type), 0), RMM_SUCCESS); CUDA_TRY(cudaMemcpy(d_forward_window, forward_window.data(), forward_window.size() * sizeof(gdf_size_type), cudaMemcpyDefault)); } out_gdf_col = { cudf::rolling_window(*input.get(), w, m, f, agg, d_window, d_min_periods, d_forward_window), deleter }; create_reference_output(agg, w, m, f, window, min_periods, forward_window); compare_gdf_result(); // free GPU memory if (d_window != NULL) EXPECT_EQ(RMM_FREE(d_window, 0), RMM_SUCCESS); if (d_min_periods != NULL) EXPECT_EQ(RMM_FREE(d_min_periods, 0), RMM_SUCCESS); if (d_forward_window != NULL) EXPECT_EQ(RMM_FREE(d_forward_window, 0), RMM_SUCCESS); } // input as data and validity mask void run_test_col(const std::vector<T> &data, const std::vector<bool> &mask, gdf_size_type w, gdf_size_type m, gdf_size_type f, const std::vector<gdf_size_type> &window, const std::vector<gdf_size_type> &min_periods, const std::vector<gdf_size_type> &forward_window, gdf_agg_op agg) { CUDF_EXPECTS(data.size() == mask.size(), "Validity array size != input column size"); cudf::test::column_wrapper<T> input{ (gdf_size_type)data.size(), [&](gdf_index_type row) { return data[row]; }, [&](gdf_index_type row) { return mask[row]; } }; run_test_col(input, w, m, f, window, min_periods, forward_window, agg); } // helper function to test all aggregators template<class... TArgs> void run_test_col_agg(TArgs... FArgs) { // test all supported aggregators run_test_col(FArgs..., GDF_SUM); run_test_col(FArgs..., GDF_MIN); run_test_col(FArgs..., GDF_MAX); run_test_col(FArgs..., GDF_COUNT); run_test_col(FArgs..., GDF_AVG); // this aggregation function is not supported yet - expected to throw an exception EXPECT_THROW(run_test_col(FArgs..., GDF_COUNT_DISTINCT), cudf::logic_error); } private: // use SFINAE to only instantiate for supported combinations template<class agg_op, bool average, typename std::enable_if_t<cudf::detail::is_supported<T, agg_op>(), std::nullptr_t> = nullptr> void create_reference_output(gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { // compute the reference solution on the cpu gdf_size_type nrows = in_col.size(); ref_data.resize(nrows); ref_data_valid.resize(nrows); agg_op op; for(gdf_size_type i = 0; i < nrows; i++) { T val = agg_op::template identity<T>(); gdf_size_type count = 0; // load sizes if (window_col.size() > 0) window = window_col[i]; if (min_periods_col.size() > 0) min_periods = min_periods_col[i]; min_periods = std::max(min_periods, 1); // at least one observation is required if (forward_window_col.size() > 0) forward_window = forward_window_col[i]; // compute bounds gdf_size_type start_index = std::max((gdf_size_type)0, i - window + 1); gdf_size_type end_index = std::min(nrows, i + forward_window + 1); // exclusive // aggregate for (gdf_size_type j = start_index; j < end_index; j++) { if (in_col_valid.size() == 0 || in_col_valid[j]) { val = op(in_col[j], val); count++; } } ref_data_valid[i] = (count >= min_periods); if (ref_data_valid[i]) { cudf::detail::store_output_functor<T, average>{}(ref_data[i], val, count); } } } template<class agg_op, bool average, typename std::enable_if_t<!cudf::detail::is_supported<T, agg_op>(), std::nullptr_t> = nullptr> void create_reference_output(gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { CUDF_FAIL("Unsupported combination of type and aggregation"); } void create_reference_output(gdf_agg_op agg, gdf_size_type window, gdf_size_type min_periods, gdf_size_type forward_window, const std::vector<gdf_size_type> &window_col, const std::vector<gdf_size_type> &min_periods_col, const std::vector<gdf_size_type> &forward_window_col) { // unroll aggregation types switch(agg) { case GDF_SUM: create_reference_output<cudf::DeviceSum, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MIN: create_reference_output<cudf::DeviceMin, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_MAX: create_reference_output<cudf::DeviceMax, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_COUNT: create_reference_output<cudf::DeviceCount, false>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; case GDF_AVG: create_reference_output<cudf::DeviceSum, true>(window, min_periods, forward_window, window_col, min_periods_col, forward_window_col); break; default: FAIL() << "aggregation type not supported"; } } void compare_gdf_result() { // convert to column_wrapper to compare // copy output data to host gdf_size_type nrows = in_col.size(); std::vector<T> out_col(nrows); CUDA_TRY(cudaMemcpy(out_col.data(), static_cast<T*>(out_gdf_col->data), nrows * sizeof(T), cudaMemcpyDefault)); // copy output valid mask to host gdf_size_type nmasks = gdf_valid_allocation_size(nrows); std::vector<gdf_valid_type> out_col_mask(nmasks); CUDA_TRY(cudaMemcpy(out_col_mask.data(), static_cast<gdf_valid_type*>(out_gdf_col->valid), nmasks * sizeof(gdf_valid_type), cudaMemcpyDefault)); // create column wrappers and compare cudf::test::column_wrapper<T> out(out_col, [&](gdf_index_type i) { return gdf_is_valid(out_col_mask.data(), i); } ); cudf::test::column_wrapper<T> ref(ref_data, [&](gdf_index_type i) { return ref_data_valid[i]; } ); // print the columns for debugging //out.print(); //ref.print(); ASSERT_TRUE(out == ref); } // input std::vector<T> in_col; std::vector<bool> in_col_valid; // reference std::vector<T> ref_data; std::vector<bool> ref_data_valid; // output gdf_col_pointer out_gdf_col; // column deleter const std::function<void(gdf_column*)> deleter = [](gdf_column* col) { col->size = 0; RMM_FREE(col->data, 0); RMM_FREE(col->valid, 0); }; }; // ------------- arithmetic types -------------------- using ArithmeticTypes = ::testing::Types<int8_t, int16_t, int32_t, int64_t, double>; TYPED_TEST_CASE(RollingTest, ArithmeticTypes); TYPED_TEST(RollingTest, EmptyInput) { cudf::test::column_wrapper<TypeParam> input(0); this->run_test_col(input, 2, 2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_SUM); } // simple example from Pandas docs TYPED_TEST(RollingTest, SimpleStatic) { // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // static sizes this->run_test_col_agg(col_data, col_mask, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // simple example from Pandas docs: TYPED_TEST(RollingTest, SimpleDynamic) { // https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // dynamic sizes this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 2 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 2 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 1 })); } // this is a special test to check the volatile count variable issue (see rolling.cu for detail) TYPED_TEST(RollingTest, VolatileCount) { const std::vector<TypeParam> col_data = { 8, 70, 45, 20, 59, 80 }; const std::vector<bool> col_mask = { 1, 1, 0, 0, 1, 0 }; // dynamic sizes this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 5, 9, 4, 8, 3, 3 }), std::vector<gdf_size_type>({ 1, 1, 9, 2, 8, 9 }), std::vector<gdf_size_type>({ 6, 3, 3, 0, 2, 1 })); } // all rows are invalid TYPED_TEST(RollingTest, AllInvalid) { gdf_size_type num_rows = 1000; gdf_size_type window = 100; gdf_size_type periods = window; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_mask.begin(), col_mask.end(), 0); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // window = forward_window = 0 TYPED_TEST(RollingTest, ZeroWindow) { gdf_size_type num_rows = 1000; gdf_size_type window = 0; gdf_size_type periods = num_rows; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // min_periods = 0 TYPED_TEST(RollingTest, ZeroPeriods) { gdf_size_type num_rows = 1000; gdf_size_type window = num_rows; gdf_size_type periods = 0; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // window in one direction is not large enough to collect enough samples, // but if using both directions we should get == min_periods, // also tests out of boundary accesses TYPED_TEST(RollingTest, BackwardForwardWindow) { gdf_size_type num_rows = 1000; gdf_size_type window = num_rows; gdf_size_type periods = num_rows; std::vector<TypeParam> col_data(num_rows); std::vector<bool> col_mask(num_rows); std::fill(col_data.begin(), col_data.end(), 1); std::fill(col_mask.begin(), col_mask.end(), 1); this->run_test_col_agg(col_data, col_mask, window, periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, static parameters, no nulls TYPED_TEST(RollingTest, RandomStaticAllValid) { gdf_size_type num_rows = 10000; gdf_size_type window = 50; gdf_size_type min_periods = 50; // random input std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); } }; this->run_test_col_agg(input, window, min_periods, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, static parameters, with nulls TYPED_TEST(RollingTest, RandomStaticWithInvalid) { gdf_size_type num_rows = 10000; gdf_size_type window = 50; gdf_size_type min_periods = 25; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; this->run_test_col_agg(input, window, min_periods, window, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()); } // random input data, dynamic parameters, no nulls TYPED_TEST(RollingTest, RandomDynamicAllValid) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; // random input std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> min_periods(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(min_periods.begin(), min_periods.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, 0, 0, window, min_periods, forward_window); } // random input data, dynamic parameters, with nulls TYPED_TEST(RollingTest, RandomDynamicWithInvalid) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> min_periods(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(min_periods.begin(), min_periods.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, 0, 0, window, min_periods, forward_window); } // mix of static and dynamic parameters TYPED_TEST(RollingTest, RandomDynamicWindowStaticPeriods) { gdf_size_type num_rows = 50000; gdf_size_type max_window_size = 50; gdf_size_type min_periods = 25; // random input with nulls std::mt19937 rng(1); cudf::test::column_wrapper<TypeParam> input{ num_rows, [&](gdf_index_type row) { return this->random_value(rng); }, [&](gdf_index_type row) { return static_cast<bool>(rng() % 2); } }; // random parameters auto generator = [&](){ return rng() % max_window_size; }; std::vector<gdf_size_type> window(num_rows); std::vector<gdf_size_type> forward_window(num_rows); std::generate(window.begin(), window.end(), generator); std::generate(forward_window.begin(), forward_window.end(), generator); this->run_test_col_agg(input, 0, min_periods, 0, window, std::vector<gdf_size_type>(), forward_window); } // ------------- expected failures -------------------- // negative sizes TYPED_TEST(RollingTest, NegativeSizes) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, -2, 2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 2, -2, 2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 2, 2, -2, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>()), cudf::logic_error); } // validity size mismatch TYPED_TEST(RollingTest, ValidSizeMismatch) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0}; // validity mask size mismatch EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2, 3 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })), cudf::logic_error); } // window array size mismatch TYPED_TEST(RollingTest, WindowArraySizeMismatch) { const std::vector<TypeParam> col_data = {0, 1, 2, 0, 4}; const std::vector<bool> col_mask = {1, 1, 1, 0, 1}; // this runs ok this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2, 3 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })); // mismatch for the window array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 2 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 }), std::vector<gdf_size_type>({ 1, 0, 1, 0, 4 })), cudf::logic_error); // mismatch for the periods array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 3 }), std::vector<gdf_size_type>({ 1, 2, 3, 4 }), std::vector<gdf_size_type>({ 2, 1, 2, 1, 4 })), cudf::logic_error); // mismatch for the forward window array EXPECT_THROW(this->run_test_col_agg(col_data, col_mask, 0, 0, 0, std::vector<gdf_size_type>({ 1, 2, 3, 4, 3 }), std::vector<gdf_size_type>({ 1, 2, 3, 4, 6 }), std::vector<gdf_size_type>({ 2, 1, 2, 1 })), cudf::logic_error); } // ------------- non-arithmetic types -------------------- using NonArithmeticTypes = ::testing::Types<cudf::category, cudf::timestamp, cudf::date32, cudf::date64, cudf::bool8>; template<typename T> using RollingTestNonArithmetic = RollingTest<T>; TYPED_TEST_CASE(RollingTestNonArithmetic, NonArithmeticTypes); // incorrect type/aggregation combo: sum or avg for non-arithmetic types TYPED_TEST(RollingTestNonArithmetic, SumAvgNonArithmetic) { constexpr gdf_size_type size{1000}; cudf::test::column_wrapper<TypeParam> input{ size, [](gdf_index_type row) { return static_cast<TypeParam>(row); }, [](gdf_index_type row) { return row % 2; } }; EXPECT_THROW(this->run_test_col( input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_SUM), cudf::logic_error); EXPECT_THROW(this->run_test_col( input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_AVG), cudf::logic_error); } // min/max/count should work for non-arithmetic types TYPED_TEST(RollingTestNonArithmetic, MinMaxCountNonArithmetic) { constexpr gdf_size_type size{1000}; cudf::test::column_wrapper<TypeParam> input{ size, [](gdf_index_type row) { return static_cast<TypeParam>(row); }, [](gdf_index_type row) { return row % 2; } }; this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_MIN); this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_MAX); this->run_test_col(input, 2, 2, 0, std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), std::vector<gdf_size_type>(), GDF_COUNT); } class RollingTestNumba : public GdfTest {}; TEST_F(RollingTestNumba, NumbaGeneric) { const char ptx[] = R"***( // // Generated by NVIDIA NVVM Compiler // // Compiler Build ID: CL-24817639 // Cuda compilation tools, release 10.0, V10.0.130 // Based on LLVM 3.4svn // .version 6.3 .target sm_70 .address_size 64 // .globl _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE .common .global .align 8 .u64 _ZN08NumbaEnv8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE; .visible .func (.param .b32 func_retval0) _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE( .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_0, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_1, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_2, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_3, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_4, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_5, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_6, .param .b64 _ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_7 ) { .reg .pred %p<3>; .reg .b32 %r<6>; .reg .b64 %rd<18>; ld.param.u64 %rd6, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_0]; ld.param.u64 %rd7, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_5]; ld.param.u64 %rd8, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_6]; ld.param.u64 %rd9, [_ZN8__main__7add$241E5ArrayIiLi1E1A7mutable7alignedE_paam_7]; mov.u64 %rd15, 0; mov.u64 %rd16, %rd15; BB0_1: mov.u64 %rd2, %rd16; mov.u32 %r5, 0; setp.ge.s64 %p1, %rd15, %rd8; mov.u64 %rd17, %rd15; @%p1 bra BB0_3; mul.lo.s64 %rd12, %rd15, %rd9; add.s64 %rd13, %rd12, %rd7; ld.u32 %r5, [%rd13]; add.s64 %rd17, %rd15, 1; BB0_3: cvt.s64.s32 %rd14, %r5; add.s64 %rd16, %rd14, %rd2; setp.lt.s64 %p2, %rd15, %rd8; mov.u64 %rd15, %rd17; @%p2 bra BB0_1; st.u64 [%rd6], %rd2; mov.u32 %r4, 0; st.param.b32 [func_retval0+0], %r4; ret; } )***"; constexpr gdf_size_type size{12}; cudf::test::column_wrapper<int> input{ size, [](gdf_index_type row) { return static_cast<int>(row); }, [](gdf_index_type row) { return true; } }; gdf_column output; EXPECT_NO_THROW( output = cudf::rolling_window(*input.get(), 2, 4, 2, ptx, GDF_NUMBA_GENERIC_AGG_OPS, GDF_INT64, nullptr, nullptr, nullptr) ); auto output_wrapper = cudf::test::column_wrapper<int64_t>(output); cudf::test::column_wrapper<int64_t> expect{ size, [](gdf_index_type row) { return static_cast<int>(row*4+2); }, [](gdf_index_type row) { return (row != 0 && row != size-2 && row != size-1); } }; EXPECT_TRUE(output_wrapper == expect); gdf_column_free(&output); }
39fb630a0df5f8850475890968c990e5dcb68e31.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <sys/time.h> #define MAT_SIZE 512*28 #define BLOCK_SIZE 512 #define MAX_ELEMENT 12 #include "stack.h" #include "../include/cuda_util.h" // Matrix to be represented as single array typedef struct { int * array; int columns; } Matrix; #include "shortestpath_cuda.h" #include "shortestpath.h" int main(int argc, char**argv) { printf("\nSHORTEST PATH: %i x %i\n\n", MAT_SIZE, MAT_SIZE); hipEvent_t start, stop; float elapsedTime, elapsedTime2; // Create a matrix and populate it with random data Matrix mat; mat.array = (int*)malloc(MAT_SIZE * MAT_SIZE * sizeof(int)); mat.columns = MAT_SIZE; srand ( time(NULL) ); for(int i = 0; i < MAT_SIZE * MAT_SIZE; i++) { mat.array[i] = rand() % MAX_ELEMENT; } // ######### CUDA ######### printf("CUDA Implementation: "); hipEventCreate(&start); hipEventCreate(&stop); // Copy matrix to global memory int *DevMat, *dev_shortest_path, *dev_result_stack; cudasafe( hipMalloc((void**)&dev_shortest_path, sizeof(int)), "hipMalloc" ); cudasafe( hipMalloc((void**)&DevMat, MAT_SIZE * MAT_SIZE * sizeof(int)), "hipMalloc" ); if(argc > 1) cudasafe( hipMalloc((void**)&dev_result_stack, MAT_SIZE * 2 * sizeof(int)), "hipMalloc" ); cudasafe( hipMemcpy(DevMat, mat.array, MAT_SIZE * MAT_SIZE * sizeof(int), hipMemcpyHostToDevice), "hipMemcpy" ); // Compute shortest path with cpu int shortestpath = 0; int *result_stack = (int*)malloc(MAT_SIZE * 2 * sizeof(int)); hipEventRecord(start,0); dim3 threadsPerBlock(BLOCK_SIZE); for(int i = 1; i < MAT_SIZE; i++) { dim3 blocks((int)ceil((float)(i+1) / (float)threadsPerBlock.x)); hipLaunchKernelGGL(( shortest_path_cuda), dim3(blocks),dim3(threadsPerBlock), 0, 0, DevMat, dev_shortest_path, dev_result_stack, i); } for(int i = 1; i < MAT_SIZE; i++) { dim3 blocks((int)ceil((float)(MAT_SIZE-i) / (float)threadsPerBlock.x)); hipLaunchKernelGGL(( shortest_path_cuda_2), dim3(blocks),dim3(threadsPerBlock), 0, 0, DevMat, dev_shortest_path, dev_result_stack, i); } if(argc > 1) hipLaunchKernelGGL(( shortest_path_cuda_3), dim3(1),dim3(1), 0, 0, DevMat, dev_shortest_path, dev_result_stack); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cudasafe( hipMemcpy(&shortestpath, dev_shortest_path, sizeof(int), hipMemcpyDeviceToHost) ,"hipMemcpy"); if(argc > 1) cudasafe( hipMemcpy(result_stack, dev_result_stack, MAT_SIZE * 2 * sizeof(int), hipMemcpyDeviceToHost) ,"hipMemcpy"); cudasafe( hipFree(DevMat), "hipFree" ); if(argc > 1) cudasafe( hipFree(dev_result_stack), "hipFree" ); cudasafe( hipFree(dev_shortest_path), "hipFree" ); // Print path taken printf("\nelapsed time: %f\n", elapsedTime); printf("\nShortest Path: %i -> ", shortestpath); if(argc > 1) { int i = -1; while(result_stack[++i] >= 0); for(i--; i >= 0; i--) { printf("%i,", result_stack[i]); } printf("\n"); } // ######### CPU Implementation ######### printf("\n\nCPU Implementation: "); // Create a result stack Stack result; stack_init(&result, MAT_SIZE*2); // Compute shortest path with cpu hipEventRecord(start,0); shortestpath = shortest_path_cpu(&mat, &result); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime2, start, stop); // Print path taken printf("\nelapsed time: %f\n", elapsedTime2); printf("\nShortest Path: %i -> ", shortestpath); if(argc > 1) { while(!is_empty(&result)) { printf("%i,", pop(&result)); } printf("\n"); } printf("\nSpeedup: %f\n", elapsedTime2/elapsedTime); return 0; }
39fb630a0df5f8850475890968c990e5dcb68e31.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #include <sys/time.h> #define MAT_SIZE 512*28 #define BLOCK_SIZE 512 #define MAX_ELEMENT 12 #include "stack.h" #include "../include/cuda_util.h" // Matrix to be represented as single array typedef struct { int * array; int columns; } Matrix; #include "shortestpath_cuda.h" #include "shortestpath.h" int main(int argc, char**argv) { printf("\nSHORTEST PATH: %i x %i\n\n", MAT_SIZE, MAT_SIZE); cudaEvent_t start, stop; float elapsedTime, elapsedTime2; // Create a matrix and populate it with random data Matrix mat; mat.array = (int*)malloc(MAT_SIZE * MAT_SIZE * sizeof(int)); mat.columns = MAT_SIZE; srand ( time(NULL) ); for(int i = 0; i < MAT_SIZE * MAT_SIZE; i++) { mat.array[i] = rand() % MAX_ELEMENT; } // ######### CUDA ######### printf("CUDA Implementation: "); cudaEventCreate(&start); cudaEventCreate(&stop); // Copy matrix to global memory int *DevMat, *dev_shortest_path, *dev_result_stack; cudasafe( cudaMalloc((void**)&dev_shortest_path, sizeof(int)), "cudaMalloc" ); cudasafe( cudaMalloc((void**)&DevMat, MAT_SIZE * MAT_SIZE * sizeof(int)), "cudaMalloc" ); if(argc > 1) cudasafe( cudaMalloc((void**)&dev_result_stack, MAT_SIZE * 2 * sizeof(int)), "cudaMalloc" ); cudasafe( cudaMemcpy(DevMat, mat.array, MAT_SIZE * MAT_SIZE * sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy" ); // Compute shortest path with cpu int shortestpath = 0; int *result_stack = (int*)malloc(MAT_SIZE * 2 * sizeof(int)); cudaEventRecord(start,0); dim3 threadsPerBlock(BLOCK_SIZE); for(int i = 1; i < MAT_SIZE; i++) { dim3 blocks((int)ceil((float)(i+1) / (float)threadsPerBlock.x)); shortest_path_cuda<<<blocks,threadsPerBlock>>>(DevMat, dev_shortest_path, dev_result_stack, i); } for(int i = 1; i < MAT_SIZE; i++) { dim3 blocks((int)ceil((float)(MAT_SIZE-i) / (float)threadsPerBlock.x)); shortest_path_cuda_2<<<blocks,threadsPerBlock>>>(DevMat, dev_shortest_path, dev_result_stack, i); } if(argc > 1) shortest_path_cuda_3<<<1,1>>>(DevMat, dev_shortest_path, dev_result_stack); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudasafe( cudaMemcpy(&shortestpath, dev_shortest_path, sizeof(int), cudaMemcpyDeviceToHost) ,"cudaMemcpy"); if(argc > 1) cudasafe( cudaMemcpy(result_stack, dev_result_stack, MAT_SIZE * 2 * sizeof(int), cudaMemcpyDeviceToHost) ,"cudaMemcpy"); cudasafe( cudaFree(DevMat), "cudaFree" ); if(argc > 1) cudasafe( cudaFree(dev_result_stack), "cudaFree" ); cudasafe( cudaFree(dev_shortest_path), "cudaFree" ); // Print path taken printf("\nelapsed time: %f\n", elapsedTime); printf("\nShortest Path: %i -> ", shortestpath); if(argc > 1) { int i = -1; while(result_stack[++i] >= 0); for(i--; i >= 0; i--) { printf("%i,", result_stack[i]); } printf("\n"); } // ######### CPU Implementation ######### printf("\n\nCPU Implementation: "); // Create a result stack Stack result; stack_init(&result, MAT_SIZE*2); // Compute shortest path with cpu cudaEventRecord(start,0); shortestpath = shortest_path_cpu(&mat, &result); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime2, start, stop); // Print path taken printf("\nelapsed time: %f\n", elapsedTime2); printf("\nShortest Path: %i -> ", shortestpath); if(argc > 1) { while(!is_empty(&result)) { printf("%i,", pop(&result)); } printf("\n"); } printf("\nSpeedup: %f\n", elapsedTime2/elapsedTime); return 0; }
7a18636511be19d3acf933f52b6989619fc81d09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include <cstdlib> #include <ctime> #include <math.h> #include <iostream> #include <set> #include "utils.hpp" #include "options.hpp" #include "b8.cuh" ///////////////////////////// ///////////////////////////// namespace chrono = std::chrono; using clock_type = chrono::high_resolution_clock; ///////////////////////////// ///////////////////////////// void reset(float *image, float *maximum, float *minimum, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { image[i * N + j] = 0; } } *maximum = 0; *minimum = 0; } void gaussian_kernel(float* kernel, int diameter, float sigma) { int mean = diameter / 2; float sum_tmp = 0; for (int i = 0; i < diameter; i++) { for (int j = 0; j < diameter; j++) { kernel[i * diameter + j] = exp(-0.5 * ((i - mean) * (i - mean) + (j - mean) * (j - mean)) / (sigma * sigma)); sum_tmp += kernel[i * diameter + j]; } } for (int i = 0; i < diameter; i++) { for (int j = 0; j < diameter; j++) { kernel[i * diameter + j] /= sum_tmp; } } } ///////////////////////////// ///////////////////////////// int main(int argc, char *argv[]) { srand(time(0)); Options options = Options(argc, argv); int debug = options.debug; int num_executions = options.num_iter; int N = options.N; int kernel_small_diameter = 3; int kernel_large_diameter = 5; int kernel_unsharpen_diameter = 3; int block_size_1d = options.block_size_1d; int block_size_2d = options.block_size_2d; int num_blocks = options.num_blocks; int skip_iterations = options.skip_iterations; int err = 0; if (debug) { std::cout << "running b8 default" << std::endl; std::cout << "N=" << N << std::endl; std::cout << "num executions=" << num_executions << std::endl; std::cout << "block size 1d=" << block_size_1d << std::endl; std::cout << "block size 2d=" << block_size_2d << std::endl; std::cout << "num blocks=" << num_blocks << std::endl; std::cout << "skip iteration time=" << skip_iterations << std::endl; } auto start = clock_type::now(); float *image, *image2, *image3, *image_unsharpen, *mask_small, *mask_large, *mask_unsharpen, *blurred_small, *blurred_large, *blurred_unsharpen; float *kernel_small, *kernel_large, *kernel_unsharpen, *maximum, *minimum; err = hipMallocManaged(&image, sizeof(float) * N * N); err = hipMallocManaged(&image2, sizeof(float) * N * N); err = hipMallocManaged(&image3, sizeof(float) * N * N); err = hipMallocManaged(&image_unsharpen, sizeof(float) * N * N); err = hipMallocManaged(&mask_small, sizeof(float) * N * N); err = hipMallocManaged(&mask_large, sizeof(float) * N * N); err = hipMallocManaged(&mask_unsharpen, sizeof(float) * N * N); err = hipMallocManaged(&blurred_small, sizeof(float) * N * N); err = hipMallocManaged(&blurred_large, sizeof(float) * N * N); err = hipMallocManaged(&blurred_unsharpen, sizeof(float) * N * N); err = hipMallocManaged(&kernel_small, sizeof(float) * kernel_small_diameter * kernel_small_diameter); err = hipMallocManaged(&kernel_large, sizeof(float) * kernel_large_diameter * kernel_large_diameter); err = hipMallocManaged(&kernel_unsharpen, sizeof(float) * kernel_unsharpen_diameter * kernel_unsharpen_diameter); err = hipMallocManaged(&maximum, sizeof(float)); err = hipMallocManaged(&minimum, sizeof(float)); if (debug && err) std::cout << err << std::endl; // Initialize arrays; start = clock_type::now(); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { image[i * N + j] = (float)(rand()) / (float)(RAND_MAX); } } gaussian_kernel(kernel_small, kernel_small_diameter, 1); gaussian_kernel(kernel_large, kernel_large_diameter, 10); gaussian_kernel(kernel_unsharpen, kernel_unsharpen_diameter, 5); // Create streams; hipStream_t s1, s2, s3, s4, s5; err = hipStreamCreate(&s1); err = hipStreamCreate(&s2); err = hipStreamCreate(&s3); err = hipStreamCreate(&s4); err = hipStreamCreate(&s5); if (err) std::cout << err << std::endl; auto end = clock_type::now(); if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl; // Print header; if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl; float tot = 0; for (int i = 0; i < num_executions; i++) { if (debug) std::cout << "\n-- iter=" << i << std::endl; auto start_tmp = clock_type::now(); reset(image3, maximum, minimum, N); auto end_tmp = clock_type::now(); auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl; dim3 block_size_2d_dim(block_size_2d, block_size_2d); dim3 grid_size(num_blocks, num_blocks); int nb = num_blocks / 2; dim3 grid_size_2(nb, nb); start = clock_type::now(); hipStreamAttachMemAsync(s1, blurred_small, 0); hipStreamAttachMemAsync(s1, mask_small, 0); hipStreamAttachMemAsync(s2, blurred_large, 0); hipStreamAttachMemAsync(s2, mask_large, 0); hipStreamAttachMemAsync(s3, blurred_unsharpen, 0); hipStreamAttachMemAsync(s3, image_unsharpen, 0); hipStreamAttachMemAsync(s2, image2, 0); hipStreamAttachMemAsync(s1, image3, 0); hipLaunchKernelGGL(( gaussian_blur), dim3(grid_size_2), dim3(block_size_2d_dim), kernel_small_diameter * kernel_small_diameter * sizeof(float), s1, image, blurred_small, N, N, kernel_small, kernel_small_diameter); hipLaunchKernelGGL(( gaussian_blur), dim3(grid_size_2), dim3(block_size_2d_dim), kernel_large_diameter * kernel_large_diameter * sizeof(float), s2, image, blurred_large, N, N, kernel_large, kernel_large_diameter); hipLaunchKernelGGL(( gaussian_blur), dim3(grid_size_2), dim3(block_size_2d_dim), kernel_unsharpen_diameter * kernel_unsharpen_diameter * sizeof(float), s3, image, blurred_unsharpen, N, N, kernel_unsharpen, kernel_unsharpen_diameter); hipLaunchKernelGGL(( sobel), dim3(grid_size_2), dim3(block_size_2d_dim), 0, s1, blurred_small, mask_small, N, N); hipLaunchKernelGGL(( sobel), dim3(grid_size_2), dim3(block_size_2d_dim), 0, s2, blurred_large, mask_large, N, N); hipEvent_t e1, e2, e3, e4, e5; hipEventCreate(&e1); hipEventCreate(&e2); hipEventCreate(&e3); hipEventCreate(&e4); hipEventCreate(&e5); hipEventRecord(e1, s2); hipStreamWaitEvent(s5, e1, 0); hipLaunchKernelGGL(( maximum_kernel), dim3(num_blocks), dim3(block_size_1d), 0, s5, mask_large, maximum, N * N); hipStreamWaitEvent(s4, e1, 0); hipLaunchKernelGGL(( minimum_kernel), dim3(num_blocks), dim3(block_size_1d), 0, s4, mask_large, minimum, N * N); hipEventRecord(e2, s4); hipEventRecord(e5, s5); hipStreamWaitEvent(s2, e2, 0); hipStreamWaitEvent(s2, e5, 0); hipLaunchKernelGGL(( extend), dim3(num_blocks), dim3(block_size_1d), 0, s2, mask_large, minimum, maximum, N * N); hipLaunchKernelGGL(( unsharpen), dim3(num_blocks), dim3(block_size_1d), 0, s3, image, blurred_unsharpen, image_unsharpen, 0.5, N * N); hipEventRecord(e3, s3); hipStreamWaitEvent(s2, e3, 0); hipLaunchKernelGGL(( combine), dim3(num_blocks), dim3(block_size_1d), 0, s2, image_unsharpen, blurred_large, mask_large, image2, N * N); hipEventRecord(e4, s2); hipStreamWaitEvent(s1, e4, 0); hipStreamAttachMemAsync(s1, image2, 0); hipLaunchKernelGGL(( combine), dim3(num_blocks), dim3(block_size_1d), 0, s1, image2, blurred_small, mask_small, image3, N * N); // Extra // hipEventRecord(e1, s2); // hipEventRecord(e2, s3); // hipStreamWaitEvent(s1, e1, 0); // hipStreamWaitEvent(s1, e2, 0); //hipLaunchKernelGGL(( combine), dim3(num_blocks), dim3(block_size_1d), 0, s1, blurred_small, blurred_large, blurred_unsharpen, image3, N * N); hipStreamSynchronize(s1); end = clock_type::now(); auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count(); if (i >= skip_iterations) tot += tmp; if (debug) { std::cout << " gpu result=["; for (int j = 0; j < 10; j++) { std::cout << image3[j] << ", "; } std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl; } else { std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl; } } // Print; hipDeviceSynchronize(); if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl; }
7a18636511be19d3acf933f52b6989619fc81d09.cu
#include <chrono> #include <cstdlib> #include <ctime> #include <math.h> #include <iostream> #include <set> #include "utils.hpp" #include "options.hpp" #include "b8.cuh" ///////////////////////////// ///////////////////////////// namespace chrono = std::chrono; using clock_type = chrono::high_resolution_clock; ///////////////////////////// ///////////////////////////// void reset(float *image, float *maximum, float *minimum, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { image[i * N + j] = 0; } } *maximum = 0; *minimum = 0; } void gaussian_kernel(float* kernel, int diameter, float sigma) { int mean = diameter / 2; float sum_tmp = 0; for (int i = 0; i < diameter; i++) { for (int j = 0; j < diameter; j++) { kernel[i * diameter + j] = exp(-0.5 * ((i - mean) * (i - mean) + (j - mean) * (j - mean)) / (sigma * sigma)); sum_tmp += kernel[i * diameter + j]; } } for (int i = 0; i < diameter; i++) { for (int j = 0; j < diameter; j++) { kernel[i * diameter + j] /= sum_tmp; } } } ///////////////////////////// ///////////////////////////// int main(int argc, char *argv[]) { srand(time(0)); Options options = Options(argc, argv); int debug = options.debug; int num_executions = options.num_iter; int N = options.N; int kernel_small_diameter = 3; int kernel_large_diameter = 5; int kernel_unsharpen_diameter = 3; int block_size_1d = options.block_size_1d; int block_size_2d = options.block_size_2d; int num_blocks = options.num_blocks; int skip_iterations = options.skip_iterations; int err = 0; if (debug) { std::cout << "running b8 default" << std::endl; std::cout << "N=" << N << std::endl; std::cout << "num executions=" << num_executions << std::endl; std::cout << "block size 1d=" << block_size_1d << std::endl; std::cout << "block size 2d=" << block_size_2d << std::endl; std::cout << "num blocks=" << num_blocks << std::endl; std::cout << "skip iteration time=" << skip_iterations << std::endl; } auto start = clock_type::now(); float *image, *image2, *image3, *image_unsharpen, *mask_small, *mask_large, *mask_unsharpen, *blurred_small, *blurred_large, *blurred_unsharpen; float *kernel_small, *kernel_large, *kernel_unsharpen, *maximum, *minimum; err = cudaMallocManaged(&image, sizeof(float) * N * N); err = cudaMallocManaged(&image2, sizeof(float) * N * N); err = cudaMallocManaged(&image3, sizeof(float) * N * N); err = cudaMallocManaged(&image_unsharpen, sizeof(float) * N * N); err = cudaMallocManaged(&mask_small, sizeof(float) * N * N); err = cudaMallocManaged(&mask_large, sizeof(float) * N * N); err = cudaMallocManaged(&mask_unsharpen, sizeof(float) * N * N); err = cudaMallocManaged(&blurred_small, sizeof(float) * N * N); err = cudaMallocManaged(&blurred_large, sizeof(float) * N * N); err = cudaMallocManaged(&blurred_unsharpen, sizeof(float) * N * N); err = cudaMallocManaged(&kernel_small, sizeof(float) * kernel_small_diameter * kernel_small_diameter); err = cudaMallocManaged(&kernel_large, sizeof(float) * kernel_large_diameter * kernel_large_diameter); err = cudaMallocManaged(&kernel_unsharpen, sizeof(float) * kernel_unsharpen_diameter * kernel_unsharpen_diameter); err = cudaMallocManaged(&maximum, sizeof(float)); err = cudaMallocManaged(&minimum, sizeof(float)); if (debug && err) std::cout << err << std::endl; // Initialize arrays; start = clock_type::now(); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { image[i * N + j] = (float)(rand()) / (float)(RAND_MAX); } } gaussian_kernel(kernel_small, kernel_small_diameter, 1); gaussian_kernel(kernel_large, kernel_large_diameter, 10); gaussian_kernel(kernel_unsharpen, kernel_unsharpen_diameter, 5); // Create streams; cudaStream_t s1, s2, s3, s4, s5; err = cudaStreamCreate(&s1); err = cudaStreamCreate(&s2); err = cudaStreamCreate(&s3); err = cudaStreamCreate(&s4); err = cudaStreamCreate(&s5); if (err) std::cout << err << std::endl; auto end = clock_type::now(); if (debug) std::cout << "init=" << (float) chrono::duration_cast<chrono::microseconds>(end - start).count() / 1000 << " ms" << std::endl; // Print header; if (!debug) std::cout << "num_iter,gpu_result,total_time_sec,overhead_sec,computation_sec" << std::endl; float tot = 0; for (int i = 0; i < num_executions; i++) { if (debug) std::cout << "\n-- iter=" << i << std::endl; auto start_tmp = clock_type::now(); reset(image3, maximum, minimum, N); auto end_tmp = clock_type::now(); auto reset_time = chrono::duration_cast<chrono::microseconds>(end_tmp - start_tmp).count(); if (debug) std::cout << " reset=" << (float) reset_time / 1000 << " ms" << std::endl; dim3 block_size_2d_dim(block_size_2d, block_size_2d); dim3 grid_size(num_blocks, num_blocks); int nb = num_blocks / 2; dim3 grid_size_2(nb, nb); start = clock_type::now(); cudaStreamAttachMemAsync(s1, blurred_small, 0); cudaStreamAttachMemAsync(s1, mask_small, 0); cudaStreamAttachMemAsync(s2, blurred_large, 0); cudaStreamAttachMemAsync(s2, mask_large, 0); cudaStreamAttachMemAsync(s3, blurred_unsharpen, 0); cudaStreamAttachMemAsync(s3, image_unsharpen, 0); cudaStreamAttachMemAsync(s2, image2, 0); cudaStreamAttachMemAsync(s1, image3, 0); gaussian_blur<<<grid_size_2, block_size_2d_dim, kernel_small_diameter * kernel_small_diameter * sizeof(float), s1>>>(image, blurred_small, N, N, kernel_small, kernel_small_diameter); gaussian_blur<<<grid_size_2, block_size_2d_dim, kernel_large_diameter * kernel_large_diameter * sizeof(float), s2>>>(image, blurred_large, N, N, kernel_large, kernel_large_diameter); gaussian_blur<<<grid_size_2, block_size_2d_dim, kernel_unsharpen_diameter * kernel_unsharpen_diameter * sizeof(float), s3>>>(image, blurred_unsharpen, N, N, kernel_unsharpen, kernel_unsharpen_diameter); sobel<<<grid_size_2, block_size_2d_dim, 0, s1>>>(blurred_small, mask_small, N, N); sobel<<<grid_size_2, block_size_2d_dim, 0, s2>>>(blurred_large, mask_large, N, N); cudaEvent_t e1, e2, e3, e4, e5; cudaEventCreate(&e1); cudaEventCreate(&e2); cudaEventCreate(&e3); cudaEventCreate(&e4); cudaEventCreate(&e5); cudaEventRecord(e1, s2); cudaStreamWaitEvent(s5, e1, 0); maximum_kernel<<<num_blocks, block_size_1d, 0, s5>>>(mask_large, maximum, N * N); cudaStreamWaitEvent(s4, e1, 0); minimum_kernel<<<num_blocks, block_size_1d, 0, s4>>>(mask_large, minimum, N * N); cudaEventRecord(e2, s4); cudaEventRecord(e5, s5); cudaStreamWaitEvent(s2, e2, 0); cudaStreamWaitEvent(s2, e5, 0); extend<<<num_blocks, block_size_1d, 0, s2>>>(mask_large, minimum, maximum, N * N); unsharpen<<<num_blocks, block_size_1d, 0, s3>>>(image, blurred_unsharpen, image_unsharpen, 0.5, N * N); cudaEventRecord(e3, s3); cudaStreamWaitEvent(s2, e3, 0); combine<<<num_blocks, block_size_1d, 0, s2>>>(image_unsharpen, blurred_large, mask_large, image2, N * N); cudaEventRecord(e4, s2); cudaStreamWaitEvent(s1, e4, 0); cudaStreamAttachMemAsync(s1, image2, 0); combine<<<num_blocks, block_size_1d, 0, s1>>>(image2, blurred_small, mask_small, image3, N * N); // Extra // cudaEventRecord(e1, s2); // cudaEventRecord(e2, s3); // cudaStreamWaitEvent(s1, e1, 0); // cudaStreamWaitEvent(s1, e2, 0); // combine<<<num_blocks, block_size_1d, 0, s1>>>(blurred_small, blurred_large, blurred_unsharpen, image3, N * N); cudaStreamSynchronize(s1); end = clock_type::now(); auto tmp = chrono::duration_cast<chrono::microseconds>(end - start).count(); if (i >= skip_iterations) tot += tmp; if (debug) { std::cout << " gpu result=["; for (int j = 0; j < 10; j++) { std::cout << image3[j] << ", "; } std::cout << ", ...]; time=" << (float) tmp / 1000 << " ms" << std::endl; } else { std::cout << i << "," << 0.0 << "," << (float) (reset_time + tmp) / 1e6 << "," << (float) reset_time / 1e6 << "," << (float) tmp / 1e6 << std::endl; } } // Print; cudaDeviceSynchronize(); if (debug) std::cout << "\nmean exec time=" << (float) tot / (1000 * (num_executions - skip_iterations)) << " ms" << std::endl; }
ebbe413d24f2e9776919b31de9ee597849ea4f31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { if (comp == sinf(var_3 / +0.0f * var_4 / -1.0418E0f / -1.6437E-36f + +0.0f)) { comp += var_5 / (+0.0f + (+1.3830E14f - asinf((+0.0f / ceilf((var_6 * var_7 + (var_8 - (+1.9576E-41f * +1.2823E-43f + var_9)))))))); comp = var_10 + +1.9929E-6f; comp += var_11 / var_12 * (var_13 / var_14 * (-0.0f / var_15)); if (comp < (var_16 - (+0.0f + atan2f((var_17 + (-0.0f + (var_18 - var_19 + +1.0240E-41f))), (var_20 / var_21))))) { comp += var_22 - -1.9692E-35f - var_23 + (var_24 / cosf(var_25 + var_26)); comp += -1.0851E-37f / (var_27 + (+1.8965E2f / var_28)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); hipDeviceSynchronize(); return 0; }
ebbe413d24f2e9776919b31de9ee597849ea4f31.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { if (comp == sinf(var_3 / +0.0f * var_4 / -1.0418E0f / -1.6437E-36f + +0.0f)) { comp += var_5 / (+0.0f + (+1.3830E14f - asinf((+0.0f / ceilf((var_6 * var_7 + (var_8 - (+1.9576E-41f * +1.2823E-43f + var_9)))))))); comp = var_10 + +1.9929E-6f; comp += var_11 / var_12 * (var_13 / var_14 * (-0.0f / var_15)); if (comp < (var_16 - (+0.0f + atan2f((var_17 + (-0.0f + (var_18 - var_19 + +1.0240E-41f))), (var_20 / var_21))))) { comp += var_22 - -1.9692E-35f - var_23 + (var_24 / cosf(var_25 + var_26)); comp += -1.0851E-37f / (var_27 + (+1.8965E2f / var_28)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29); cudaDeviceSynchronize(); return 0; }
b050052f6a5a5ee1cf2c89513988825bd15af06a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <cstdio> #include <hip/hip_runtime.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" // 2D diffusion example // the grid has a fixed width of nx=128 // the use specifies the height, ny, as a power of two // note that nx and ny have 2 added to them to account for halos template <typename T> void fill_gpu(T *v, T value, int n); void write_to_file(int nx, int ny, double* data); __global__ void diffusion(double *x0, double *x1, int nx, int ny, double dt) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto j = threadIdx.y + blockDim.y*blockIdx.y; if(i<nx-2 && j<ny-2) { auto pos = i+1 + (j+1)*nx; x1[pos] = x0[pos] + dt * (-4.*x0[pos] + x0[pos-nx] + x0[pos+nx] + x0[pos-1] + x0[pos+1]); } } int main(int argc, char** argv) { // set up parameters // first argument is the y dimension = 2^arg size_t pow = read_arg(argc, argv, 1, 8); // second argument is the number of time steps size_t nsteps = read_arg(argc, argv, 2, 100); // set domain size size_t nx = 128+2; size_t ny = (1 << pow)+2; double dt = 0.1; std::cout << "\n## " << nx << "x" << ny << " for " << nsteps << " time steps" << " (" << nx*ny << " grid points)" << std::endl; // allocate memory on device and host // note : allocate enough memory for the halo around the boundary auto buffer_size = nx*ny; double *x_host = malloc_host_pinned<double>(buffer_size); double *x0 = malloc_device<double>(buffer_size); double *x1 = malloc_device<double>(buffer_size); // set initial conditions of 0 everywhere fill_gpu(x0, 0., buffer_size); fill_gpu(x1, 0., buffer_size); // set boundary conditions of 1 on south border fill_gpu(x0, 1., nx); fill_gpu(x1, 1., nx); fill_gpu(x0+nx*(ny-1), 1., nx); fill_gpu(x1+nx*(ny-1), 1., nx); CudaStream stream; CudaStream copy_stream(true); auto start_event = stream.enqueue_event(); dim3 block_dim(8,8); dim3 grid_dim((nx-2)/block_dim.x, (ny-2)/block_dim.y); // time stepping loop for(auto step=0; step<nsteps; ++step) { hipLaunchKernelGGL(( diffusion), dim3(grid_dim), dim3(block_dim), 0, 0, x0, x1, nx, ny, dt); std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); stop_event.wait(); copy_to_host<double>(x0, x_host, buffer_size); double time = stop_event.time_since(start_event); std::cout << "## " << time << "s, " << nsteps*(nx-2)*(ny-2) / time << " points/second" << std::endl << std::endl; std::cout << "writing to output.bin/bov" << std::endl; write_to_file(nx, ny, x_host); return 0; } template <typename T> __global__ void fill(T *v, T value, int n) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid<n) { v[tid] = value; } } template <typename T> void fill_gpu(T *v, T value, int n) { auto block_dim = 192ul; auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0); hipLaunchKernelGGL(( fill<T>), dim3(grid_dim), dim3(block_dim), 0, 0, v, value, n); } void write_to_file(int nx, int ny, double* data) { { FILE* output = fopen("output.bin", "w"); fwrite(data, sizeof(double), nx * ny, output); fclose(output); } std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << nx << ", " << ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl; }
b050052f6a5a5ee1cf2c89513988825bd15af06a.cu
#include <iostream> #include <fstream> #include <cstdio> #include <cuda.h> #include "util.h" #include "CudaStream.h" #include "CudaEvent.h" // 2D diffusion example // the grid has a fixed width of nx=128 // the use specifies the height, ny, as a power of two // note that nx and ny have 2 added to them to account for halos template <typename T> void fill_gpu(T *v, T value, int n); void write_to_file(int nx, int ny, double* data); __global__ void diffusion(double *x0, double *x1, int nx, int ny, double dt) { auto i = threadIdx.x + blockDim.x*blockIdx.x; auto j = threadIdx.y + blockDim.y*blockIdx.y; if(i<nx-2 && j<ny-2) { auto pos = i+1 + (j+1)*nx; x1[pos] = x0[pos] + dt * (-4.*x0[pos] + x0[pos-nx] + x0[pos+nx] + x0[pos-1] + x0[pos+1]); } } int main(int argc, char** argv) { // set up parameters // first argument is the y dimension = 2^arg size_t pow = read_arg(argc, argv, 1, 8); // second argument is the number of time steps size_t nsteps = read_arg(argc, argv, 2, 100); // set domain size size_t nx = 128+2; size_t ny = (1 << pow)+2; double dt = 0.1; std::cout << "\n## " << nx << "x" << ny << " for " << nsteps << " time steps" << " (" << nx*ny << " grid points)" << std::endl; // allocate memory on device and host // note : allocate enough memory for the halo around the boundary auto buffer_size = nx*ny; double *x_host = malloc_host_pinned<double>(buffer_size); double *x0 = malloc_device<double>(buffer_size); double *x1 = malloc_device<double>(buffer_size); // set initial conditions of 0 everywhere fill_gpu(x0, 0., buffer_size); fill_gpu(x1, 0., buffer_size); // set boundary conditions of 1 on south border fill_gpu(x0, 1., nx); fill_gpu(x1, 1., nx); fill_gpu(x0+nx*(ny-1), 1., nx); fill_gpu(x1+nx*(ny-1), 1., nx); CudaStream stream; CudaStream copy_stream(true); auto start_event = stream.enqueue_event(); dim3 block_dim(8,8); dim3 grid_dim((nx-2)/block_dim.x, (ny-2)/block_dim.y); // time stepping loop for(auto step=0; step<nsteps; ++step) { diffusion<<<grid_dim, block_dim>>>(x0, x1, nx, ny, dt); std::swap(x0, x1); } auto stop_event = stream.enqueue_event(); stop_event.wait(); copy_to_host<double>(x0, x_host, buffer_size); double time = stop_event.time_since(start_event); std::cout << "## " << time << "s, " << nsteps*(nx-2)*(ny-2) / time << " points/second" << std::endl << std::endl; std::cout << "writing to output.bin/bov" << std::endl; write_to_file(nx, ny, x_host); return 0; } template <typename T> __global__ void fill(T *v, T value, int n) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if(tid<n) { v[tid] = value; } } template <typename T> void fill_gpu(T *v, T value, int n) { auto block_dim = 192ul; auto grid_dim = n/block_dim + (n%block_dim ? 1 : 0); fill<T><<<grid_dim, block_dim>>>(v, value, n); } void write_to_file(int nx, int ny, double* data) { { FILE* output = fopen("output.bin", "w"); fwrite(data, sizeof(double), nx * ny, output); fclose(output); } std::ofstream fid("output.bov"); fid << "TIME: 0.0" << std::endl; fid << "DATA_FILE: output.bin" << std::endl; fid << "DATA_SIZE: " << nx << ", " << ny << ", 1" << std::endl;; fid << "DATA_FORMAT: DOUBLE" << std::endl; fid << "VARIABLE: phi" << std::endl; fid << "DATA_ENDIAN: LITTLE" << std::endl; fid << "CENTERING: nodal" << std::endl; fid << "BRICK_SIZE: 1.0 1.0 1.0" << std::endl; }
2fb3d4c00c3d6a04d79e4a44a8731ea939f97a8e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void MinusMeanKernel (double *Dens, double *Energy, double SigmaMed, double mean_dens_r, double mean_dens_r2, double mean_energy_r,double mean_energy_r2, double EnergyMed, int nsec, int nrad, double SigmaMed2, double EnergyMed2) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; if (j< nsec){ Dens[i*nsec + j] += SigmaMed - mean_dens_r; Energy[i*nsec + j] += EnergyMed - mean_energy_r; } i = nrad-1; if (j < nsec){ Dens[i*nsec + j] += SigmaMed2 - mean_dens_r2; Energy[i*nsec + j] += EnergyMed2 - mean_energy_r2; } }
2fb3d4c00c3d6a04d79e4a44a8731ea939f97a8e.cu
#include "includes.h" __global__ void MinusMeanKernel (double *Dens, double *Energy, double SigmaMed, double mean_dens_r, double mean_dens_r2, double mean_energy_r,double mean_energy_r2, double EnergyMed, int nsec, int nrad, double SigmaMed2, double EnergyMed2) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; if (j< nsec){ Dens[i*nsec + j] += SigmaMed - mean_dens_r; Energy[i*nsec + j] += EnergyMed - mean_energy_r; } i = nrad-1; if (j < nsec){ Dens[i*nsec + j] += SigmaMed2 - mean_dens_r2; Energy[i*nsec + j] += EnergyMed2 - mean_energy_r2; } }
63c2d84cbb62476332dd0c60be432d4108eeda95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/adjacent_difference.h> #if PETSC_CPP_VERSION >= 14 #define PETSC_HAVE_THRUST_ASYNC 1 // thrust::for_each(thrust::hip::par.on()) requires C++14 #include <thrust/async/for_each.h> #endif #include <thrust/iterator/constant_iterator.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0}; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* The following are copied from hipsparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { HIPSPARSE_MV_ALG_DEFAULT = 0, HIPSPARSE_COOMV_ALG = 1, HIPSPARSE_CSRMV_ALG1 = 2, HIPSPARSE_CSRMV_ALG2 = 3 } hipsparseSpMVAlg_t; typedef enum { HIPSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, HIPSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG1) = 1, HIPSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_SPMM_COO_ALG2) = 2, HIPSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, HIPSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(HIPSPARSE_CSRMM_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, HIPSPARSE_SPMM_COO_ALG1 = 1, HIPSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, HIPSPARSE_CSRMM_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } hipsparseSpMMAlg_t; typedef enum { HIPSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic HIPSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic } hipsparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "hipsparseSpMVAlg_t", "CUSPARSE_", 0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "hipsparseSpMMAlg_t", "CUSPARSE_SPMM_", 0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "hipsparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]); static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]); static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode); PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op); } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular operation. Only the `MatMult()` operation can use different GPU storage formats Not Collective Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` . op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`. `MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`. - format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.) Level: intermediate .seealso: [](chapter_matrices), `Mat`, `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`. Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` - use_cpu - set flag for using the built-in CPU `MatSolve()` Level: intermediate Note: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). .seealso: [](chapter_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu)); PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg) { PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); A->form_explicit_transpose = flg; break; default: PetscCall(MatSetOption_SeqAIJ(A, op, flg)); break; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS isrow = b->row, iscol = b->col; PetscBool row_identity, col_identity; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); if (!cusparsestruct->use_cpu_solve) { if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject) { MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options"); if (A->factortype == MAT_FACTOR_NONE) { PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format)); PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format)); PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg)); if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "hipsparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg)); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if CUSPARSE_VERSION > 11301 PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else PetscCheck(!flg || HIPSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "hipsparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg)); PetscCheck(!flg || HIPSPARSE_CSRMM_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); PetscCall( PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "hipsparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg)); PetscCheck(!flg || HIPSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum hipsparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } PetscOptionsHeadEnd(); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; const PetscInt *ai = a->i, *aj = a->j, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiLo, *AjLo; PetscInt i, nz, nzLower, offset, rowOffset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower = n + ai[n] - ai[1]; if (!loTriFactor) { PetscScalar *AALo; PetscCallCUDA(hipHostMalloc((void **)&AALo, nzLower * sizeof(PetscScalar))); /* Allocate Space for the lower triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiLo, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjLo, nzLower * sizeof(PetscInt))); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt)0; AiLo[n] = nzLower; AjLo[0] = (PetscInt)0; AALo[0] = (MatScalar)1.0; v = aa; vi = aj; offset = 1; rowOffset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz + 1; PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz)); PetscCall(PetscArraycpy(&(AALo[offset]), v, nz)); offset += nz; AjLo[offset] = (PetscInt)i; AALo[offset] = (MatScalar)1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_LOWER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT)); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo + nzLower); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; PetscCallCUDA(hipHostFree(AiLo)); PetscCallCUDA(hipHostFree(AjLo)); PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar))); } else { /* update values only */ if (!loTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar))); /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz)); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower); PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; const PetscInt *aj = a->j, *adiag = a->diag, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiUp, *AjUp; PetscInt i, nz, nzUpper, offset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0] - adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar))); /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; vi = aj + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1. / v[nz]; AiUp[i] = AiUp[i + 1] - (nz + 1); PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz)); PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz)); } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; PetscCallCUDA(hipHostFree(AiUp)); PetscCallCUDA(hipHostFree(AjUp)); PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar))); } else { if (!upTriFactor->AA_h) PetscCallCUDA(hipHostMalloc((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar))); /* Fill the upper triangular matrix */ offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1. / v[nz]; PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz)); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper); PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS isrow = a->row, iscol = a->icol; PetscBool row_identity, col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A)); PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); cusparseTriFactors->nnz = a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(isrow, &row_identity)); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; PetscCall(ISGetIndices(isrow, &r)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r + n); PetscCall(ISRestoreIndices(isrow, &r)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } /* upper triangular indices */ PetscCall(ISIdentity(iscol, &col_identity)); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; PetscCall(ISGetIndices(iscol, &c)); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c + n); PetscCall(ISRestoreIndices(iscol, &c)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data; const PetscInt *ai = b->i, *aj = b->j, *vj; const MatScalar *aa = b->a, *v; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { PetscCallCUDA(hipHostMalloc((void **)&AAUp, nzUpper * sizeof(PetscScalar))); PetscCallCUDA(hipHostMalloc((void **)&AALo, nzUpper * sizeof(PetscScalar))); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(hipHostMalloc((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(hipHostMalloc((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1.0 / v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz)); PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(upTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactor->descr, HIPSPARSE_DIAG_TYPE_UNIT)); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); /* set the operation */ upTriFactor->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactor->descr, HIPSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(hipsparseSetMatType(loTriFactor->descr, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactor->descr, HIPSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactor->descr, HIPSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ loTriFactor->solveOp = HIPSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar)))); PetscCallCUDA(hipHostFree(AiUp)); PetscCallCUDA(hipHostFree(AjUp)); } else { /* Fill the upper triangular matrix */ offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0 / v[nz]; AALo[offset] = 1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar))); } PetscCallCUDA(hipHostFree(AAUp)); PetscCallCUDA(hipHostFree(AALo)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); cusparseTriFactors->nnz = (a->nz - n) * 2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(ip, &perm_identity)); if (!perm_identity) { IS iip; const PetscInt *irip, *rip; PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip)); PetscCall(ISGetIndices(iip, &irip)); PetscCall(ISGetIndices(ip, &rip)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip + n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip + n); PetscCall(ISRestoreIndices(iip, &irip)); PetscCall(ISDestroy(&iip)); PetscCall(ISRestoreIndices(ip, &rip)); PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS ip = b->row; PetscBool perm_identity; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ PetscCall(ISIdentity(ip, &perm_identity)); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; hipsparseIndexBase_t indexBase; hipsparseMatrixType_t matrixType; hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ PetscCall(PetscNew(&loTriFactorT)); loTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&loTriFactorT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(loTriFactorT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(loTriFactorT->descr, matrixType)); PetscCallCUSPARSE(hipsparseSetMatFillMode(loTriFactorT->descr, fillMode)); PetscCallCUSPARSE(hipsparseSetMatDiagType(loTriFactorT->descr, diagType)); /* set the operation */ loTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize)); PetscCallCUDA(hipMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ PetscCall(PetscNew(&upTriFactorT)); upTriFactorT->solvePolicy = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr) == HIPSPARSE_FILL_MODE_UPPER ? HIPSPARSE_FILL_MODE_LOWER : HIPSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(hipsparseCreateMatDescr(&upTriFactorT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(upTriFactorT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(upTriFactorT->descr, matrixType)); PetscCallCUSPARSE(hipsparseSetMatFillMode(upTriFactorT->descr, fillMode)); PetscCallCUSPARSE(hipsparseSetMatDiagType(upTriFactorT->descr, diagType)); /* set the operation */ upTriFactorT->solveOp = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, HIPSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize)); PetscCallCUDA(hipMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ /* christ, would it have killed you to put this stuff in a function????????? */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(PETSC_SUCCESS); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; hipsparseStatus_t stat; hipsparseIndexBase_t indexBase; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); PetscCall(PetscLogGpuTimeBegin()); if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstructT->descr)); indexBase = cusparseGetMatIndexBase(matstruct->descr); PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstructT->descr, indexBase)); PetscCallCUSPARSE(hipsparseSetMatType(matstructT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* set alpha and beta */ PetscCallCUDA(hipMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1) stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = hipsparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get()); PetscCallCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY *)tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets; delete (CsrMatrix *)tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY *)temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets; delete (CsrMatrix *)temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix *)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat; PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix"); PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows"); PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols"); PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values"); PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT"); PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows"); PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols"); PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = hipsparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc(&csr2cscBuffer, csr2cscBufferSize)); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns HIPSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, HIPSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer); PetscCallCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), HIPSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUDA(hipFree(csr2cscBuffer)); #endif } PetscCallThrust( thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(PETSC_SUCCESS); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = HIPSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Then, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer)); /* Last, reorder with the column permutation */ thrust::copy(thrust::hip::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Next, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #if CUSPARSE_VERSION >= 11500 /* cusparseSpSV_solve() and friends first appeared in cusparse-11.3 */ static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ILU0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); // cusparseSpSV_solve() scretely uses the external buffer used in cusparseSpSV_analysis()! /* Solve U*x = y */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, /* U X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_ILU0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; if (!fs->createdTransposeSpSVDescr) { /* Call MatSolveTranspose() for the first time */ PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); fs->createdTransposeSpSVDescr = PETSC_TRUE; } if (!fs->updatedTransposeSpSVAnalysis) { PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); fs->updatedTransposeSpSVAnalysis = PETSC_TRUE; } PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve Ut*y = b */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, /* Ut Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut)); /* Solve Lt*x = y */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ if (m) PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; hipsparseStatus_t status; status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero); PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero); } /* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02() See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78 */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); /* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */ fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ILU0; fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_ILU0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ILU; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr, sizeof(int) * (m + 1))); PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx, sizeof(int) * nz)); PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr, Ai, sizeof(int) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx, Aj, sizeof(int) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create descriptors for M, L, U */ /* ====================================================================== */ hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = HIPSPARSE_FILL_MODE_LOWER; diagType = HIPSPARSE_DIAG_TYPE_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = HIPSPARSE_FILL_MODE_UPPER; diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_U, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csrilu0, SpSV and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(hipsparseCreateCsrilu02Info(&fs->ilu0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); /* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab, and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77, spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U. To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U. */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); } else { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_U = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ilu0 on M, SpSv on L and U */ /* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/ /* ========================================================================== */ int structural_zero; hipsparseStatus_t status; fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function hipsparseXcsrilu02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */ status = hipsparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero); PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, *Adiag, nzRow, nzLeft; PetscLogDouble flops = 0.0; PetscCall(MatMarkDiagonal_SeqAIJ(A)); Ai = Aseq->i; Adiag = Aseq->diag; for (PetscInt i = 0; i < m; i++) { if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */ nzRow = Ai[i + 1] - Ai[i]; nzLeft = Adiag[i] - Ai[i]; /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); /* Solve Lt*x = y */ PetscCallCUSPARSE(hipsparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(hipMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ /* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve Function csric02() only takes the lower triangular part of matrix A to perform factorization. The matrix type must be HIPSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored, and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not. In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided. */ if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; hipsparseStatus_t status; status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero); PetscAssert(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero); } PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); /* Note that cusparse reports this error if we use double and HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE ** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> HIP_R_64F */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ICC; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(hipMalloc((void **)&fs->csrRowPtr, sizeof(int) * (m + 1))); PetscCallCUDA(hipMalloc((void **)&fs->csrColIdx, sizeof(int) * nz)); PetscCallCUDA(hipMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(hipMemcpyAsync(fs->csrRowPtr, Ai, sizeof(int) * (m + 1), hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(hipMemcpyAsync(fs->csrColIdx, Aj, sizeof(int) * nz, hipMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create mat descriptors for M, L */ /* ====================================================================== */ hipsparseFillMode_t fillMode; hipsparseDiagType_t diagType; PetscCallCUSPARSE(hipsparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(fs->matDescr_M, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(fs->matDescr_M, HIPSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#hipsparseDiagType_t hipsparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if HIPSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = HIPSPARSE_FILL_MODE_LOWER; diagType = HIPSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(hipsparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(hipsparseSpMatSetAttribute(fs->spMatDescr_L, HIPSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(hipsparseCreateCsric02Info(&fs->ic0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(hipMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(hipMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, HIPSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); /* To save device memory, we make the factorization buffer share with one of the solver buffer. See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(). */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); } else { PetscCallCUDA(hipMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_Lt = fs->factBuffer_M; PetscCallCUDA(hipMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ic0 on M */ /* The lower triangular part of M has the same sparsity pattern as L */ /* ========================================================================== */ int structural_zero; hipsparseStatus_t status; fs->policy_M = HIPSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function hipsparseXcsric02_zeroPivot() is a blocking call. It calls hipDeviceSynchronize() to make sure all previous kernels are done. */ status = hipsparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero); PetscCheck(HIPSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, nzRow, nzLeft; PetscLogDouble flops = 0.0; Ai = Aseq->i; for (PetscInt i = 0; i < m; i++) { nzRow = Ai[i + 1] - Ai[i]; if (nzRow > 1) { /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0; PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if CUSPARSE_VERSION >= 11500 PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) { PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); } if (!info->levels && row_identity && col_identity) { PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if CUSPARSE_VERSION >= 11500 PetscBool perm_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity)); if (!info->levels && perm_identity) { PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B) { PetscInt n = A->rmap->n; PetscBool factOnDevice, factOnHost; char *prefix; char factPlace[32] = "device"; /* the default */ PetscFunctionBegin; PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B)); PetscCall(MatSetSizes(*B, n, n, n, n)); (*B)->factortype = ftype; PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE)); prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix; PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat"); PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL)); PetscOptionsEnd(); PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice)); PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost)); PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace); ((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice; if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE)); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { PetscCall(MatSetBlockSizesFromMats(*B, A, A)); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT])); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC])); } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types"); PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL)); (*B)->canuseordering = PETSC_TRUE; PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; #if CUSPARSE_VERSION >= 13500 Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; #endif PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat; PetscCallCUDA(hipMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost)); } #if CUSPARSE_VERSION >= 13500 else if (fs->csrVal) { /* We have a factorized matrix on device and are able to copy it to host */ PetscCallCUDA(hipMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), hipMemcpyDeviceToHost)); } #endif else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host"); PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype) { Mat_SeqAIJCUSPARSE *cusp; CsrMatrix *matrix; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix"); cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL"); matrix = (CsrMatrix *)cusp->mat->mat; if (i) { #if !defined(PETSC_USE_64BIT_INDICES) *i = matrix->row_offsets->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (j) { #if !defined(PETSC_USE_64BIT_INDICES) *j = matrix->column_indices->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (a) *a = matrix->values->data().get(); if (mtype) *mtype = PETSC_MEMTYPE_CUDA; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt m = A->rmap->n, *ii, *ridx, tmp; hipsparseStatus_t stat; PetscBool both = PETSC_TRUE; PetscFunctionBegin; PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix *)cusparsestruct->mat->mat; PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values"); PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); matrix->values->assign(a->a, a->a + a->nz); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); } else { PetscInt nnz; PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data"); /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(hipsparseCreateMatDescr(&matstruct->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(matstruct->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(matstruct->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUSPARSE(hipsparseSetPointerMode(cusparsestruct->handle, HIPSPARSE_POINTER_MODE_DEVICE)); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = hipsparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY *)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets; delete (CsrMatrix *)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx, ridx + m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar))); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ hipsparseDnMatDescr_t matBDescr; hipsparseDnMatDescr_t matCDescr; PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ hipsparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { MatMatCusparse *mmdata = (MatMatCusparse *)data; PetscFunctionBegin; PetscCallCUDA(hipFree(mmdata->Bt)); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mmdata->matSpBDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mmdata->matSpBDescr)); if (mmdata->matBDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr)); if (mmdata->matCDescr) PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr)); if (mmdata->spgemmDesc) PetscCallCUSPARSE(hipsparseSpGEMM_destroyDescr(mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) if (mmdata->dBuffer4) PetscCallCUDA(hipFree(mmdata->dBuffer4)); if (mmdata->dBuffer5) PetscCallCUDA(hipFree(mmdata->dBuffer5)); #endif if (mmdata->mmBuffer) PetscCallCUDA(hipFree(mmdata->mmBuffer)); if (mmdata->mmBuffer2) PetscCallCUDA(hipFree(mmdata->mmBuffer2)); #endif PetscCall(MatDestroy(&mmdata->X)); PetscCall(PetscFree(data)); PetscFunctionReturn(PETSC_SUCCESS); } #include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal() static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n, blda, clda; PetscBool flg, biscuda; Mat_SeqAIJCUSPARSE *cusp; hipsparseStatus_t stat; hipsparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); mmdata = (MatMatCusparse *)product->data; A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = HIPSPARSE_OPERATION_TRANSPOSE; } else { PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); mat = cusp->matTranspose; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix *)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B)); PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr)); PetscCall(MatDenseGetLDA(B, &blda)); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr)); PetscCall(MatDenseGetLDA(mmdata->X, &clda)); } else { PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr)); PetscCall(MatDenseGetLDA(C, &clda)); } PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? HIPSPARSE_OPERATION_TRANSPOSE : HIPSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) { PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matBDescr)); mmdata->matBDescr = NULL; } if (!mmdata->matBDescr) { PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, HIPSPARSE_ORDER_COL)); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) { PetscCallCUSPARSE(hipsparseDestroyDnMat(mmdata->matCDescr)); mmdata->matCDescr = NULL; } if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ PetscCallCUSPARSE(hipsparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, HIPSPARSE_ORDER_COL)); mmdata->Clda = clda; } if (!mat->matDescr) { stat = hipsparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } stat = hipsparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize); PetscCallCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { PetscCallCUDA(hipFree(mmdata->mmBuffer)); PetscCallCUDA(hipMalloc(&mmdata->mmBuffer, mmBufferSize)); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ PetscCallCUSPARSE(hipsparseSpMatSetValues(mat->matDescr, csrmat->values->data().get())); PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matBDescr, (void *)barray)); PetscCallCUSPARSE(hipsparseDnMatSetValues(mmdata->matCDescr, (void *)carray)); } /* do hipsparseSpMM, which supports transpose on B */ stat = hipsparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer); PetscCallCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { hipblasHandle_t cublasv2handle; hipblasStatus_t cerr; PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); cerr = cublasXgeam(cublasv2handle, HIPBLAS_OP_T, HIPBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n); PetscCallCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries)); PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray)); if (product->type == MATPRODUCT_RARt) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE)); } else if (product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE)); } else { PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray)); } if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n; PetscBool cisdense, flg; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCall(MatSetSizes(C, m, n, m, n)); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense)); PetscCall(MatSetType(C, MATSEQDENSECUDA)); /* product data */ PetscCall(PetscNew(&mmdata)); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(hipMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar))); #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X)); PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA)); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n)); } else { PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n)); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscBool flg; hipsparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) hipsparseSpMatDescr_t BmatSpDescr; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name); mmdata = (MatMatCusparse *)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]); Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } Cmat = Ccusp->mat; PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #else stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogGpuTimeEnd()); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz)); PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax)); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *a, *b, *c; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt i, j, m, n, k; PetscBool flg; hipsparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed, ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) int64_t C_num_rows1, C_num_cols1, C_nnz1; hipsparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE, opB = HIPSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); a = (Mat_SeqAIJ *)A->data; b = (Mat_SeqAIJ *)B->data; /* product data */ PetscCall(PetscNew(&mmdata)); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } /* create cusparse matrix */ PetscCall(MatSetSizes(C, m, n, m, n)); PetscCall(MatSetType(C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)C->data; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex)); PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows)); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1); PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix *)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = hipsparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i + 1]; for (j = st; j < en; j++) { const PetscInt brow = a->j[j]; flops += 2. * (b->i[brow + 1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt anzi = a->i[i + 1] - a->i[i]; const PetscInt bnzi = b->i[i + 1] - b->i[i]; flops += (2. * anzi) * bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); PetscCallCUSPARSE(hipsparseSpGEMM_createDescr(&mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void *dBuffer1 = NULL; void *dBuffer2 = NULL; void *dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&dBuffer1, bufferSize1)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&dBuffer2, bufferSize2)); PetscCallCUDA(hipMalloc((void **)&dBuffer3, bufferSize3)); PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer4, bufferSize4)); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4); PetscCallCUSPARSE(stat); PetscCallCUDA(hipFree(dBuffer1)); PetscCallCUDA(hipFree(dBuffer2)); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&mmdata->dBuffer5, bufferSize5)); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5); PetscCallCUSPARSE(stat); PetscCallCUDA(hipFree(dBuffer3)); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024)); } #else size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer2, bufSize2)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = hipsparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2); PetscCallCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL); PetscCallCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ PetscCallCUDA(hipMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize)); /* compute the intermediate product of A * B */ stat = hipsparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(hipsparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024, mmdata->mmBufferSize / 1024)); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ stat = hipsparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = hipsparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, HIPSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0) #else PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_HOST)); stat = hipsparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz); PetscCallCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(hipPeekAtLastError()); /* catch out of memory errors */ PetscCallCUSPARSE(hipsparseSetPointerMode(Ccusp->handle, HIPSPARSE_POINTER_MODE_DEVICE)); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCall(PetscLogGpuTimeEnd()); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(hipMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(hipMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r + 1] = old; } for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows]; } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k + 1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(C)); PetscCall(PetscMalloc1(c->nz, &c->a)); Ccsr->num_entries = c->nz; C->nonzerostate++; PetscCall(PetscLayoutSetUp(C->rmap)); PetscCall(PetscLayoutSetUp(C->cmap)); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat, 1); PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense)); if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp)); if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp)); } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_AtB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat"); PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_PtAP: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat"); PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_RARt: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat"); PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_ABC: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat)); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ PetscCall(MatProductSetFromOptions_SeqAIJ(mat)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray, *zarray, *dptr, *beta, *xptr; hipsparseOperation_t opA = HIPSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscInt nx, ny; #endif PetscFunctionBegin; PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported"); if (!a->nz) { if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz)); else PetscCall(VecSeq_CUDA::Set(zz, 0)); PetscFunctionReturn(PETSC_SUCCESS); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE : HIPSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */ else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */ PetscCall(PetscLogGpuTimeBegin()); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each( #if PetscDefined(HAVE_THRUST_ASYNC) thrust::hip::par.on(PetscDefaultCudaStream), #endif thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on hipsparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype)); PetscCallCUSPARSE(hipsparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype)); PetscCallCUSPARSE( hipsparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize)); PetscCallCUDA(hipMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize)); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr)); PetscCallCUSPARSE(hipsparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr)); } PetscCallCUSPARSE(hipsparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer)); #else CsrMatrix *mat = (CsrMatrix *)matstruct->mat; PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr)); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr)); #endif } } PetscCall(PetscLogGpuTimeEnd()); if (opA == HIPSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Set(zz, 0)); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { PetscCall(PetscLogGpuTimeBegin()); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered) and in the destructor of the scope, it will call hipStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::hip::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); hipLaunchKernelGGL(( ScatterAdd), dim3((n + 255) / 256), dim3(256), 0, PetscDefaultCudaStream, n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray); #endif PetscCall(PetscLogGpuTimeEnd()); } } else { if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray)); else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } if (yy) { PetscCall(PetscLogGpuFlops(2.0 * a->nz)); } else { PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode) { PetscObjectState onnz = A->nonzerostate; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCall(MatAssemblyEnd_SeqAIJ(A, mode)); if (onnz != A->nonzerostate && cusp->deviceMat) { PetscCall(PetscInfo(A, "Destroy device mat since nonzerostate changed\n")); PetscCallCUDA(hipFree(cusp->deviceMat)); cusp->deviceMat = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to `PETSC_COMM_SELF` . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL` Output Parameter: . A - the matrix Level: intermediate Notes: It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`, MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`] If `nnz` is given then `nz` is ignored The AIJ format, also called compressed row storage, is fully compatible with standard Fortran storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. .seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MatCreateAIJ()`, `MATSEQAIJCUSPARSE`, `MATAIJCUSPARSE` @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A) { PetscFunctionBegin; PetscCall(MatCreate(comm, A)); PetscCall(MatSetSizes(*A, m, n, m, n)); PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE)); PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { PetscCall(MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE **)&A->spptr)); } else { PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr)); } PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL)); PetscCall(MatDestroy_SeqAIJ(A)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B) { PetscFunctionBegin; PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str) { Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry, *csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE *)Y->spptr; cx = (Mat_SeqAIJCUSPARSE *)X->spptr; if (X->ops->axpy != Y->ops->axpy) { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); PetscFunctionReturn(PETSC_SUCCESS); } /* if we are here, it means both matrices are bound to GPU */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(X)); PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix *)cy->mat->mat; csrx = (CsrMatrix *)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin()); if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin()); if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) size_t bufferSize; void *buffer; #endif PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_HOST)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize)); PetscCallCUDA(hipMalloc(&buffer, bufferSize)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer)); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUDA(hipFree(buffer)); #else PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get())); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); #endif PetscCallCUSPARSE(hipsparseSetPointerMode(cy->handle, HIPSPARSE_POINTER_MODE_DEVICE)); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else if (str == SAME_NONZERO_PATTERN) { hipblasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(x->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one)); PetscCall(PetscLogGpuFlops(2.0 * bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a) { Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data; PetscScalar *ay; hipblasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(y->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one)); PetscCall(PetscLogGpuFlops(bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr; if (spptr->mat) { CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } if (spptr->matTranspose) { CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat; if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n])); PetscCall(MatSeqAIJInvalidateDiagonal(A)); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) { A->boundtocpu = flg; PetscFunctionReturn(PETSC_SUCCESS); } if (flg) { PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps))); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE; PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat) { Mat B; PetscFunctionBegin; PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat)); } else if (reuse == MAT_REUSE_MATRIX) { PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN)); } B = *newmat; PetscCall(PetscFree(B->defaultvectype)); PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype)); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(hipsparseCreate(&spptr->handle)); PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream)); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if CUSPARSE_VERSION > 11301 spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = HIPSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = HIPSPARSE_CSRMM_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = HIPSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(hipsparseCreate(&spptr->handle)); PetscCallCUSPARSE(hipsparseSetStream(spptr->handle, PetscDefaultCudaStream)); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE)); PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE)); #if defined(PETSC_HAVE_HYPRE) PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE)); #endif PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscFunctionBegin; PetscCall(MatCreate_SeqAIJ(B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either CSR, ELL, or Hybrid format. All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()` . -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU Level: beginner .seealso: [](chapter_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat, MatFactorType, Mat *); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscFunctionBegin; PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse_band)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatResetPreallocationCOO_SeqAIJCUSPARSE(Mat mat) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)mat->spptr; PetscFunctionBegin; if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; if (cusp->use_extended_coo) { PetscCallCUDA(hipFree(cusp->jmap_d)); PetscCallCUDA(hipFree(cusp->perm_d)); } cusp->use_extended_coo = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscFunctionBegin; if (*cusparsestruct) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat, (*cusparsestruct)->format)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose, (*cusparsestruct)->format)); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) PetscCallCUSPARSE(hipsparseDestroy((*cusparsestruct)->handle)); if ((*cusparsestruct)->jmap_d) PetscCallCUDA(hipFree((*cusparsestruct)->jmap_d)); if ((*cusparsestruct)->perm_d) PetscCallCUDA(hipFree((*cusparsestruct)->perm_d)); PetscCall(PetscFree(*cusparsestruct)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*trifactor)->descr)); if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo)); PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat)); if ((*trifactor)->solveBuffer) PetscCallCUDA(hipFree((*trifactor)->solveBuffer)); if ((*trifactor)->AA_h) PetscCallCUDA(hipHostFree((*trifactor)->AA_h)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(hipFree((*trifactor)->csr2cscBuffer)); #endif PetscCall(PetscFree(*trifactor)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format) { CsrMatrix *mat; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat)); #endif } else { mat = (CsrMatrix *)(*matstruct)->mat; PetscCall(CsrMatrix_Destroy(&mat)); } } if ((*matstruct)->descr) PetscCallCUSPARSE(hipsparseDestroyMatDescr((*matstruct)->descr)); delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) PetscCallCUDA(hipFree((*matstruct)->alpha_one)); if ((*matstruct)->beta_zero) PetscCallCUDA(hipFree((*matstruct)->beta_zero)); if ((*matstruct)->beta_one) PetscCallCUDA(hipFree((*matstruct)->beta_one)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) PetscCallCUSPARSE(hipsparseDestroySpMat(mdata->matDescr)); for (int i = 0; i < 3; i++) { if (mdata->cuSpMV[i].initialized) { PetscCallCUDA(hipFree(mdata->cuSpMV[i].spmvBuffer)); PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr)); PetscCallCUSPARSE(hipsparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr)); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors) { Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors; PetscFunctionBegin; if (fs) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose)); delete fs->rpermIndices; delete fs->cpermIndices; delete fs->workVector; fs->rpermIndices = NULL; fs->cpermIndices = NULL; fs->workVector = NULL; if (fs->a_band_d) PetscCallCUDA(hipFree(fs->a_band_d)); if (fs->i_band_d) PetscCallCUDA(hipFree(fs->i_band_d)); fs->init_dev_prop = PETSC_FALSE; #if CUSPARSE_VERSION >= 11500 PetscCallCUDA(hipFree(fs->csrRowPtr)); PetscCallCUDA(hipFree(fs->csrColIdx)); PetscCallCUDA(hipFree(fs->csrVal)); PetscCallCUDA(hipFree(fs->X)); PetscCallCUDA(hipFree(fs->Y)); // PetscCallCUDA(hipFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */ PetscCallCUDA(hipFree(fs->spsvBuffer_L)); PetscCallCUDA(hipFree(fs->spsvBuffer_U)); PetscCallCUDA(hipFree(fs->spsvBuffer_Lt)); PetscCallCUDA(hipFree(fs->spsvBuffer_Ut)); PetscCallCUSPARSE(hipsparseDestroyMatDescr(fs->matDescr_M)); PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_L)); PetscCallCUSPARSE(hipsparseDestroySpMat(fs->spMatDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut)); PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_X)); PetscCallCUSPARSE(hipsparseDestroyDnVec(fs->dnVecDescr_Y)); PetscCallCUSPARSE(hipsparseDestroyCsrilu02Info(fs->ilu0Info_M)); PetscCallCUSPARSE(hipsparseDestroyCsric02Info(fs->ic0Info_M)); fs->createdTransposeSpSVDescr = PETSC_FALSE; fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; #endif } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors) { PetscFunctionBegin; if (*trifactors) { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors)); PetscCallCUSPARSE(hipsparseDestroy((*trifactors)->handle)); PetscCall(PetscFree(*trifactors)); } PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 || t2; } }; #include <thrust/iterator/discard_iterator.h> /* Associated with MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic() */ PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscInt n; PetscFunctionBegin; PetscCheck(cusp, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE struct"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); PetscFunctionReturn(PETSC_SUCCESS); } matrix = (CsrMatrix *)cusp->mat->mat; PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v, v + n); d_v = cooPerm_v->data(); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); } PetscCall(PetscLogGpuTimeBegin()); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */ THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()); /* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output) cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[]. cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero. */ thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), cooPerm_w->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(), cooPerm_w->end(), matrix->values->begin(), matrix->values->begin(), thrust::plus<PetscScalar>()); delete cooPerm_w; } else { /* all nonzeros in d_v[] are unique entries */ auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit, zieit, VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */ } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), matrix->values->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit, zieit, VecCUDAEquals()); } } PetscCall(PetscLogGpuTimeEnd()); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(PetscObjectStateIncrease((PetscObject)A)); /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(A, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", A->rmap->n, A->cmap->n, a->nz)); PetscCall(PetscInfo(A, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(A, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", a->rmax)); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); if (destroy) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } #include <thrust/binary_search.h> /* 'Basic' means it only works when coo_i[] and coo_j[] do not contain negative indices */ PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat A, PetscCount n, PetscInt coo_i[], PetscInt coo_j[]) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt cooPerm_n, nzr = 0; PetscFunctionBegin; PetscCall(PetscLayoutSetUp(A->rmap)); PetscCall(PetscLayoutSetUp(A->cmap)); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { thrust::device_ptr<PetscInt> d_i, d_j; PetscInt *d_raw_i, *d_raw_j; PetscBool free_raw_i = PETSC_FALSE, free_raw_j = PETSC_FALSE; PetscMemType imtype, jmtype; PetscCall(PetscGetMemType(coo_i, &imtype)); if (PetscMemTypeHost(imtype)) { PetscCallCUDA(hipMalloc(&d_raw_i, sizeof(PetscInt) * n)); PetscCallCUDA(hipMemcpy(d_raw_i, coo_i, sizeof(PetscInt) * n, hipMemcpyHostToDevice)); d_i = thrust::device_pointer_cast(d_raw_i); free_raw_i = PETSC_TRUE; PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt))); } else { d_i = thrust::device_pointer_cast(coo_i); } PetscCall(PetscGetMemType(coo_j, &jmtype)); if (PetscMemTypeHost(jmtype)) { // MatSetPreallocationCOO_MPIAIJCUSPARSE_Basic() passes device coo_i[] and host coo_j[]! PetscCallCUDA(hipMalloc(&d_raw_j, sizeof(PetscInt) * n)); PetscCallCUDA(hipMemcpy(d_raw_j, coo_j, sizeof(PetscInt) * n, hipMemcpyHostToDevice)); d_j = thrust::device_pointer_cast(d_raw_j); free_raw_j = PETSC_TRUE; PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt))); } else { d_j = thrust::device_pointer_cast(coo_j); } THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) cusp->cooPerm = new THRUSTINTARRAY(n); if (!cusp->cooPerm_a) cusp->cooPerm_a = new THRUSTINTARRAY(n); /* Ex. n = 6 coo_i = [3,3,1,4,1,4] coo_j = [3,2,2,5,2,6] */ auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i, d_j)); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i + n, d_j + n)); PetscCall(PetscLogGpuTimeBegin()); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */ (*cusp->cooPerm_a).assign(d_i, d_i + n); /* copy the sorted array */ THRUSTINTARRAY w(d_j, d_j + n); /* d_i = [1,1,3,3,4,4] d_j = [2,2,2,3,5,6] cooPerm = [2,4,1,0,3,5] */ auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */ /* d_i = [1,3,3,4,4,x] ^ekey d_j = [2,2,3,5,6,x] ^nekye */ if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* Stefano: I couldn't come up with a more elegant algorithm */ /* idea: any change in i or j in the (i,j) sequence implies a new nonzero */ adjacent_difference(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/ adjacent_difference(w.begin(), w.end(), w.begin(), IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/ (*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a hipMemcpy */ w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), w.begin(), cusp->cooPerm_a->begin(), IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/ thrust::inclusive_scan(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/ } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i, nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */ search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */ ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */ PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqXAIJFreeAIJ(A, &a->a, &a->j, &a->i)); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(A->rmap->n + 1, &a->i)); a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */ PetscCallCUDA(hipMemcpy(a->i + 1, ii.data().get(), A->rmap->n * sizeof(PetscInt), hipMemcpyDeviceToHost)); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; PetscCall(PetscMalloc1(a->nz, &a->a)); PetscCall(PetscMalloc1(a->nz, &a->j)); PetscCallCUDA(hipMemcpy(a->j, thrust::raw_pointer_cast(d_j), a->nz * sizeof(PetscInt), hipMemcpyDeviceToHost)); if (!a->ilen) PetscCall(PetscMalloc1(A->rmap->n, &a->ilen)); if (!a->imax) PetscCall(PetscMalloc1(A->rmap->n, &a->imax)); for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i + 1] - a->i[i]; nzr += (PetscInt) !!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax, nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; PetscCall(PetscLogGpuToCpu((A->rmap->n + a->nz) * sizeof(PetscInt))); PetscCall(MatMarkDiagonal_SeqAIJ(A)); if (free_raw_i) PetscCallCUDA(hipFree(d_raw_i)); if (free_raw_j) PetscCallCUDA(hipFree(d_raw_j)); } else { PetscCall(MatSeqAIJSetPreallocation(A, 0, NULL)); } PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE)); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ PetscCall(PetscArrayzero(a->a, a->nz)); PetscCall(MatCheckCompressedRow(A, nzr, &a->compressedrow, a->i, A->rmap->n, 0.6)); A->offloadmask = PETSC_OFFLOAD_CPU; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[]) { Mat_SeqAIJ *seq; Mat_SeqAIJCUSPARSE *dev; PetscBool coo_basic = PETSC_TRUE; PetscMemType mtype = PETSC_MEMTYPE_DEVICE; PetscFunctionBegin; PetscCall(MatResetPreallocationCOO_SeqAIJ(mat)); PetscCall(MatResetPreallocationCOO_SeqAIJCUSPARSE(mat)); if (coo_i) { PetscCall(PetscGetMemType(coo_i, &mtype)); if (PetscMemTypeHost(mtype)) { for (PetscCount k = 0; k < coo_n; k++) { if (coo_i[k] < 0 || coo_j[k] < 0) { coo_basic = PETSC_FALSE; break; } } } } if (coo_basic) { /* i,j are on device or do not contain negative indices */ PetscCall(MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(mat, coo_n, coo_i, coo_j)); } else { PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, coo_i, coo_j)); mat->offloadmask = PETSC_OFFLOAD_CPU; PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat)); seq = static_cast<Mat_SeqAIJ *>(mat->data); dev = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr); PetscCallCUDA(hipMalloc((void **)&dev->jmap_d, (seq->nz + 1) * sizeof(PetscCount))); PetscCallCUDA(hipMemcpy(dev->jmap_d, seq->jmap, (seq->nz + 1) * sizeof(PetscCount), hipMemcpyHostToDevice)); PetscCallCUDA(hipMalloc((void **)&dev->perm_d, seq->Atot * sizeof(PetscCount))); PetscCallCUDA(hipMemcpy(dev->perm_d, seq->perm, seq->Atot * sizeof(PetscCount), hipMemcpyHostToDevice)); dev->use_extended_coo = PETSC_TRUE; } PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[]) { PetscCount i = blockIdx.x * blockDim.x + threadIdx.x; const PetscCount grid_size = gridDim.x * blockDim.x; for (; i < nnz; i += grid_size) { PetscScalar sum = 0.0; for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]]; a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum; } } PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCount Annz = seq->nz; PetscMemType memtype; const PetscScalar *v1 = v; PetscScalar *Aa; PetscFunctionBegin; if (dev->use_extended_coo) { PetscCall(PetscGetMemType(v, &memtype)); if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */ PetscCallCUDA(hipMalloc((void **)&v1, seq->coo_n * sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy((void *)v1, v, seq->coo_n * sizeof(PetscScalar), hipMemcpyHostToDevice)); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa)); if (Annz) { hipLaunchKernelGGL(( MatAddCOOValues), dim3((Annz + 255) / 256), dim3(256), 0, 0, v1, Annz, dev->jmap_d, dev->perm_d, imode, Aa); PetscCallCUDA(hipPeekAtLastError()); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa)); if (PetscMemTypeHost(memtype)) PetscCallCUDA(hipFree((void *)v1)); } else { PetscCall(MatSetValuesCOO_SeqAIJCUSPARSE_Basic(A, v, imode)); } PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices. Not Collective Input Parameters: + A - the matrix - compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer Note: When compressed is true, the CSR structure does not contain empty rows .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()` Not Collective Input Parameters: + A - the matrix . compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form . ia - the CSR row pointers - ja - the CSR column indices Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool, const int **i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: Does not trigger host-device copies and flags data validity on the GPU .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare4 { __host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) { } __host__ __device__ inline int operator()(const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt Annz, Bnnz; hipsparseStatus_t stat; PetscInt i, m, n, zero = 0; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidHeaderSpecific(B, MAT_CLASSID, 2); PetscValidPointer(C, 4); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheckTypeName(B, MATSEQAIJCUSPARSE); PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n); PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported"); PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; PetscCall(MatCreate(PETSC_COMM_SELF, C)); PetscCall(MatSetSizes(*C, m, n, m, n)); PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; PetscCallCUSPARSE(hipsparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(Cmat->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(Cmat->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff, *Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; PetscCall(PetscLogGpuTimeBegin()); stat = hipsparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); stat = hipsparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2, Annz); PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0) thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred)); PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred)); #endif stat = hipsparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), HIPSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); PetscCall(PetscLogGpuTimeEnd()); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = hipsparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); PetscCall(PetscLogGpuTimeBegin()); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT); thrust::advance(rT, -1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz)); thrust::copy(titb, tite, rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT); if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUSPARSE(hipsparseCreateMatDescr(&CmatT->descr)); PetscCallCUSPARSE(hipsparseSetMatIndexBase(CmatT->descr, HIPSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(hipsparseSetMatType(CmatT->descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(hipMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(hipMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(hipMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), hipMemcpyHostToDevice)); PetscCallCUDA(hipMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), hipMemcpyHostToDevice)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = hipsparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; PetscCallCUDA(hipMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } else { PetscCallCUDA(hipMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); PetscCallCUDA(hipMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), hipMemcpyDeviceToHost)); } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i + 1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(*C)); PetscCall(PetscMalloc1(c->nz, &c->a)); (*C)->nonzerostate++; PetscCall(PetscLayoutSetUp((*C)->rmap)); PetscCall(PetscLayoutSetUp((*C)->cmap)); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n); c = (Mat_SeqAIJ *)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; PetscCheck(Ccusp->cooPerm, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cooPerm"); PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Ccsr = (CsrMatrix *)Ccusp->mat->mat; PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size()); PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size()); PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size()); PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries); PetscCheck(Ccusp->cooPerm->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->cooPerm->size(), (PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid, Acsr->num_entries); PetscCall(PetscLogGpuTimeBegin()); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); thrust::for_each(zibait, zieait, VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->end()))); thrust::for_each(zibbit, ziebit, VecCUDAEquals()); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE)); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); (*C)->transupdated = PETSC_TRUE; } PetscCall(PetscLogGpuTimeEnd()); } } PetscCall(PetscObjectStateIncrease((PetscObject)*C)); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { bool dmem; const PetscScalar *av; PetscFunctionBegin; dmem = isCudaMem(v); PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av)); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx, idx + n); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n)); thrust::for_each(zibit, zieit, VecCUDAEquals()); if (w) PetscCallCUDA(hipMemcpy(v, w->data().get(), n * sizeof(PetscScalar), hipMemcpyDeviceToHost)); delete w; } else { PetscCallCUDA(hipMemcpy(v, av, n * sizeof(PetscScalar), dmem ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost)); } if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av)); PetscFunctionReturn(PETSC_SUCCESS); }
63c2d84cbb62476332dd0c60be432d4108eeda95.cu
/* Defines the basic matrix operations for the AIJ (compressed row) matrix storage format using the CUSPARSE library, */ #define PETSC_SKIP_IMMINTRIN_H_CUDAWORKAROUND 1 #include <petscconf.h> #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/ #include <../src/mat/impls/sbaij/seq/sbaij.h> #include <../src/vec/vec/impls/dvecimpl.h> #include <petsc/private/vecimpl.h> #undef VecType #include <../src/mat/impls/aij/seq/seqcusparse/cusparsematimpl.h> #include <thrust/adjacent_difference.h> #if PETSC_CPP_VERSION >= 14 #define PETSC_HAVE_THRUST_ASYNC 1 // thrust::for_each(thrust::cuda::par.on()) requires C++14 #include <thrust/async/for_each.h> #endif #include <thrust/iterator/constant_iterator.h> #include <thrust/remove.h> #include <thrust/sort.h> #include <thrust/unique.h> const char *const MatCUSPARSEStorageFormats[] = {"CSR", "ELL", "HYB", "MatCUSPARSEStorageFormat", "MAT_CUSPARSE_", 0}; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* The following are copied from cusparse.h in CUDA-11.0. In MatCUSPARSESpMVAlgorithms[] etc, we copy them in 0-based integer value order, since we want to use PetscOptionsEnum() to parse user command line options for them. typedef enum { CUSPARSE_MV_ALG_DEFAULT = 0, CUSPARSE_COOMV_ALG = 1, CUSPARSE_CSRMV_ALG1 = 2, CUSPARSE_CSRMV_ALG2 = 3 } cusparseSpMVAlg_t; typedef enum { CUSPARSE_MM_ALG_DEFAULT CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_ALG_DEFAULT) = 0, CUSPARSE_COOMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG1) = 1, CUSPARSE_COOMM_ALG2 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG2) = 2, CUSPARSE_COOMM_ALG3 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_COO_ALG3) = 3, CUSPARSE_CSRMM_ALG1 CUSPARSE_DEPRECATED_ENUM(CUSPARSE_SPMM_CSR_ALG1) = 4, CUSPARSE_SPMM_ALG_DEFAULT = 0, CUSPARSE_SPMM_COO_ALG1 = 1, CUSPARSE_SPMM_COO_ALG2 = 2, CUSPARSE_SPMM_COO_ALG3 = 3, CUSPARSE_SPMM_COO_ALG4 = 5, CUSPARSE_SPMM_CSR_ALG1 = 4, CUSPARSE_SPMM_CSR_ALG2 = 6, } cusparseSpMMAlg_t; typedef enum { CUSPARSE_CSR2CSC_ALG1 = 1, // faster than V2 (in general), deterministic CUSPARSE_CSR2CSC_ALG2 = 2 // low memory requirement, non-deterministic } cusparseCsr2CscAlg_t; */ const char *const MatCUSPARSESpMVAlgorithms[] = {"MV_ALG_DEFAULT", "COOMV_ALG", "CSRMV_ALG1", "CSRMV_ALG2", "cusparseSpMVAlg_t", "CUSPARSE_", 0}; const char *const MatCUSPARSESpMMAlgorithms[] = {"ALG_DEFAULT", "COO_ALG1", "COO_ALG2", "COO_ALG3", "CSR_ALG1", "COO_ALG4", "CSR_ALG2", "cusparseSpMMAlg_t", "CUSPARSE_SPMM_", 0}; const char *const MatCUSPARSECsr2CscAlgorithms[] = {"INVALID" /*cusparse does not have enum 0! We created one*/, "ALG1", "ALG2", "cusparseCsr2CscAlg_t", "CUSPARSE_CSR2CSC_", 0}; #endif static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, const MatFactorInfo *); static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat, Mat, IS, IS, const MatFactorInfo *); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat, Mat, const MatFactorInfo *); static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat, Vec, Vec); static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat, PetscOptionItems *PetscOptionsObject); static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat, PetscScalar, Mat, MatStructure); static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat, PetscScalar); static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat, Vec, Vec); static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec); static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat, Vec, Vec, Vec, PetscBool, PetscBool); static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **); static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **, MatCUSPARSEStorageFormat); static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **); static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **); static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat); static PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat, PetscBool); static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat, PetscInt, const PetscInt[], PetscScalar[]); static PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat, PetscCount, PetscInt[], PetscInt[]); static PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat, const PetscScalar[], InsertMode); PETSC_INTERN PetscErrorCode MatCUSPARSESetFormat_SeqAIJCUSPARSE(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; switch (op) { case MAT_CUSPARSE_MULT: cusparsestruct->format = format; break; case MAT_CUSPARSE_ALL: cusparsestruct->format = format; break; default: SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "unsupported operation %d for MatCUSPARSEFormatOperation. MAT_CUSPARSE_MULT and MAT_CUSPARSE_ALL are currently supported.", op); } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetFormat - Sets the storage format of `MATSEQCUSPARSE` matrices for a particular operation. Only the `MatMult()` operation can use different GPU storage formats Not Collective Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` . op - `MatCUSPARSEFormatOperation`. `MATSEQAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT` and `MAT_CUSPARSE_ALL`. `MATMPIAIJCUSPARSE` matrices support `MAT_CUSPARSE_MULT_DIAG`,`MAT_CUSPARSE_MULT_OFFDIAG`, and `MAT_CUSPARSE_ALL`. - format - `MatCUSPARSEStorageFormat` (one of `MAT_CUSPARSE_CSR`, `MAT_CUSPARSE_ELL`, `MAT_CUSPARSE_HYB`.) Level: intermediate .seealso: [](chapter_matrices), `Mat`, `Mat`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetFormat(Mat A, MatCUSPARSEFormatOperation op, MatCUSPARSEStorageFormat format) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetFormat_C", (Mat, MatCUSPARSEFormatOperation, MatCUSPARSEStorageFormat), (A, op, format)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE(Mat A, PetscBool use_cpu) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; cusparsestruct->use_cpu_solve = use_cpu; PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCUSPARSESetUseCPUSolve - Sets to use CPU `MatSolve()`. Input Parameters: + A - Matrix of type `MATSEQAIJCUSPARSE` - use_cpu - set flag for using the built-in CPU `MatSolve()` Level: intermediate Note: The cuSparse LU solver currently computes the factors with the built-in CPU method and moves the factors to the GPU for the solve. We have observed better performance keeping the data on the CPU and computing the solve there. This method to specify if the solve is done on the CPU or GPU (GPU is the default). .seealso: [](chapter_matrices), `Mat`, `MatSolve()`, `MATSEQAIJCUSPARSE`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` @*/ PetscErrorCode MatCUSPARSESetUseCPUSolve(Mat A, PetscBool use_cpu) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscTryMethod(A, "MatCUSPARSESetUseCPUSolve_C", (Mat, PetscBool), (A, use_cpu)); PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSetOption_SeqAIJCUSPARSE(Mat A, MatOption op, PetscBool flg) { PetscFunctionBegin; switch (op) { case MAT_FORM_EXPLICIT_TRANSPOSE: /* need to destroy the transpose matrix if present to prevent from logic errors if flg is set to true later */ if (A->form_explicit_transpose && !flg) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); A->form_explicit_transpose = flg; break; default: PetscCall(MatSetOption_SeqAIJ(A, op, flg)); break; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A); static PetscErrorCode MatLUFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS isrow = b->row, iscol = b->col; PetscBool row_identity, col_identity; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatLUFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); if (!cusparsestruct->use_cpu_solve) { if (row_identity && col_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; } } B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; /* get the triangular factors */ if (!cusparsestruct->use_cpu_solve) PetscCall(MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSetFromOptions_SeqAIJCUSPARSE(Mat A, PetscOptionItems *PetscOptionsObject) { MatCUSPARSEStorageFormat format; PetscBool flg; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscOptionsHeadBegin(PetscOptionsObject, "SeqAIJCUSPARSE options"); if (A->factortype == MAT_FACTOR_NONE) { PetscCall(PetscOptionsEnum("-mat_cusparse_mult_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_MULT, format)); PetscCall(PetscOptionsEnum("-mat_cusparse_storage_format", "sets storage format of (seq)aijcusparse gpu matrices for SpMV and TriSolve", "MatCUSPARSESetFormat", MatCUSPARSEStorageFormats, (PetscEnum)cusparsestruct->format, (PetscEnum *)&format, &flg)); if (flg) PetscCall(MatCUSPARSESetFormat(A, MAT_CUSPARSE_ALL, format)); PetscCall(PetscOptionsBool("-mat_cusparse_use_cpu_solve", "Use CPU (I)LU solve", "MatCUSPARSESetUseCPUSolve", cusparsestruct->use_cpu_solve, &cusparsestruct->use_cpu_solve, &flg)); if (flg) PetscCall(MatCUSPARSESetUseCPUSolve(A, cusparsestruct->use_cpu_solve)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCall(PetscOptionsEnum("-mat_cusparse_spmv_alg", "sets cuSPARSE algorithm used in sparse-mat dense-vector multiplication (SpMV)", "cusparseSpMVAlg_t", MatCUSPARSESpMVAlgorithms, (PetscEnum)cusparsestruct->spmvAlg, (PetscEnum *)&cusparsestruct->spmvAlg, &flg)); /* If user did use this option, check its consistency with cuSPARSE, since PetscOptionsEnum() sets enum values based on their position in MatCUSPARSESpMVAlgorithms[] */ #if CUSPARSE_VERSION > 11301 PetscCheck(!flg || CUSPARSE_SPMV_CSR_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #else PetscCheck(!flg || CUSPARSE_CSRMV_ALG1 == 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMVAlg_t has been changed but PETSc has not been updated accordingly"); #endif PetscCall(PetscOptionsEnum("-mat_cusparse_spmm_alg", "sets cuSPARSE algorithm used in sparse-mat dense-mat multiplication (SpMM)", "cusparseSpMMAlg_t", MatCUSPARSESpMMAlgorithms, (PetscEnum)cusparsestruct->spmmAlg, (PetscEnum *)&cusparsestruct->spmmAlg, &flg)); PetscCheck(!flg || CUSPARSE_SPMM_CSR_ALG1 == 4, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseSpMMAlg_t has been changed but PETSc has not been updated accordingly"); PetscCall( PetscOptionsEnum("-mat_cusparse_csr2csc_alg", "sets cuSPARSE algorithm used in converting CSR matrices to CSC matrices", "cusparseCsr2CscAlg_t", MatCUSPARSECsr2CscAlgorithms, (PetscEnum)cusparsestruct->csr2cscAlg, (PetscEnum *)&cusparsestruct->csr2cscAlg, &flg)); PetscCheck(!flg || CUSPARSE_CSR2CSC_ALG1 == 1, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE enum cusparseCsr2CscAlg_t has been changed but PETSc has not been updated accordingly"); #endif } PetscOptionsHeadEnd(); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILULowerTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; const PetscInt *ai = a->i, *aj = a->j, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiLo, *AjLo; PetscInt i, nz, nzLower, offset, rowOffset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* first figure out the number of nonzeros in the lower triangular matrix including 1's on the diagonal. */ nzLower = n + ai[n] - ai[1]; if (!loTriFactor) { PetscScalar *AALo; PetscCallCUDA(cudaMallocHost((void **)&AALo, nzLower * sizeof(PetscScalar))); /* Allocate Space for the lower triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiLo, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjLo, nzLower * sizeof(PetscInt))); /* Fill the lower triangular matrix */ AiLo[0] = (PetscInt)0; AiLo[n] = nzLower; AjLo[0] = (PetscInt)0; AALo[0] = (MatScalar)1.0; v = aa; vi = aj; offset = 1; rowOffset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; /* additional 1 for the term on the diagonal */ AiLo[i] = rowOffset; rowOffset += nz + 1; PetscCall(PetscArraycpy(&(AjLo[offset]), vi, nz)); PetscCall(PetscArraycpy(&(AALo[offset]), v, nz)); offset += nz; AjLo[offset] = (PetscInt)i; AALo[offset] = (MatScalar)1.0; offset += 1; v += nz; vi += nz; } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_LOWER)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT)); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = n; loTriFactor->csrMat->num_cols = n; loTriFactor->csrMat->num_entries = nzLower; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); loTriFactor->csrMat->row_offsets->assign(AiLo, AiLo + n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzLower); loTriFactor->csrMat->column_indices->assign(AjLo, AjLo + nzLower); loTriFactor->csrMat->values = new THRUSTARRAY(nzLower); loTriFactor->csrMat->values->assign(AALo, AALo + nzLower); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; loTriFactor->AA_h = AALo; PetscCallCUDA(cudaFreeHost(AiLo)); PetscCallCUDA(cudaFreeHost(AjLo)); PetscCall(PetscLogCpuToGpu((n + 1 + nzLower) * sizeof(int) + nzLower * sizeof(PetscScalar))); } else { /* update values only */ if (!loTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&loTriFactor->AA_h, nzLower * sizeof(PetscScalar))); /* Fill the lower triangular matrix */ loTriFactor->AA_h[0] = 1.0; v = aa; vi = aj; offset = 1; for (i = 1; i < n; i++) { nz = ai[i + 1] - ai[i]; PetscCall(PetscArraycpy(&(loTriFactor->AA_h[offset]), v, nz)); offset += nz; loTriFactor->AA_h[offset] = 1.0; offset += 1; v += nz; } loTriFactor->csrMat->values->assign(loTriFactor->AA_h, loTriFactor->AA_h + nzLower); PetscCall(PetscLogCpuToGpu(nzLower * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt n = A->rmap->n; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; const PetscInt *aj = a->j, *adiag = a->diag, *vi; const MatScalar *aa = a->a, *v; PetscInt *AiUp, *AjUp; PetscInt i, nz, nzUpper, offset; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { /* next, figure out the number of nonzeros in the upper triangular matrix. */ nzUpper = adiag[0] - adiag[n]; if (!upTriFactor) { PetscScalar *AAUp; PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar))); /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; vi = aj + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1. / v[nz]; AiUp[i] = AiUp[i + 1] - (nz + 1); PetscCall(PetscArraycpy(&(AjUp[offset + 1]), vi, nz)); PetscCall(PetscArraycpy(&(AAUp[offset + 1]), v, nz)); } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = n; upTriFactor->csrMat->num_cols = n; upTriFactor->csrMat->num_entries = nzUpper; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(nzUpper); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + nzUpper); upTriFactor->csrMat->values = new THRUSTARRAY(nzUpper); upTriFactor->csrMat->values->assign(AAUp, AAUp + nzUpper); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; upTriFactor->AA_h = AAUp; PetscCallCUDA(cudaFreeHost(AiUp)); PetscCallCUDA(cudaFreeHost(AjUp)); PetscCall(PetscLogCpuToGpu((n + 1 + nzUpper) * sizeof(int) + nzUpper * sizeof(PetscScalar))); } else { if (!upTriFactor->AA_h) PetscCallCUDA(cudaMallocHost((void **)&upTriFactor->AA_h, nzUpper * sizeof(PetscScalar))); /* Fill the upper triangular matrix */ offset = nzUpper; for (i = n - 1; i >= 0; i--) { v = aa + adiag[i + 1] + 1; /* number of elements NOT on the diagonal */ nz = adiag[i] - adiag[i + 1] - 1; /* decrement the offset */ offset -= (nz + 1); /* first, set the diagonal elements */ upTriFactor->AA_h[offset] = 1. / v[nz]; PetscCall(PetscArraycpy(&(upTriFactor->AA_h[offset + 1]), v, nz)); } upTriFactor->csrMat->values->assign(upTriFactor->AA_h, upTriFactor->AA_h + nzUpper); PetscCall(PetscLogCpuToGpu(nzUpper * sizeof(PetscScalar))); } } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEILUAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS isrow = a->row, iscol = a->icol; PetscBool row_identity, col_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCall(MatSeqAIJCUSPARSEBuildILULowerTriMatrix(A)); PetscCall(MatSeqAIJCUSPARSEBuildILUUpperTriMatrix(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); cusparseTriFactors->nnz = a->nz; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(isrow, &row_identity)); if (!row_identity && !cusparseTriFactors->rpermIndices) { const PetscInt *r; PetscCall(ISGetIndices(isrow, &r)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(r, r + n); PetscCall(ISRestoreIndices(isrow, &r)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } /* upper triangular indices */ PetscCall(ISIdentity(iscol, &col_identity)); if (!col_identity && !cusparseTriFactors->cpermIndices) { const PetscInt *c; PetscCall(ISGetIndices(iscol, &c)); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(c, c + n); PetscCall(ISRestoreIndices(iscol, &c)); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEBuildICCTriMatrices(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; PetscInt *AiUp, *AjUp; PetscScalar *AAUp; PetscScalar *AALo; PetscInt nzUpper = a->nz, n = A->rmap->n, i, offset, nz, j; Mat_SeqSBAIJ *b = (Mat_SeqSBAIJ *)A->data; const PetscInt *ai = b->i, *aj = b->j, *vj; const MatScalar *aa = b->a, *v; PetscFunctionBegin; if (!n) PetscFunctionReturn(PETSC_SUCCESS); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { try { PetscCallCUDA(cudaMallocHost((void **)&AAUp, nzUpper * sizeof(PetscScalar))); PetscCallCUDA(cudaMallocHost((void **)&AALo, nzUpper * sizeof(PetscScalar))); if (!upTriFactor && !loTriFactor) { /* Allocate Space for the upper triangular matrix */ PetscCallCUDA(cudaMallocHost((void **)&AiUp, (n + 1) * sizeof(PetscInt))); PetscCallCUDA(cudaMallocHost((void **)&AjUp, nzUpper * sizeof(PetscInt))); /* Fill the upper triangular matrix */ AiUp[0] = (PetscInt)0; AiUp[n] = nzUpper; offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; vj = aj + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AjUp[offset] = (PetscInt)i; AAUp[offset] = (MatScalar)1.0 / v[nz]; AiUp[i] = offset; AALo[offset] = (MatScalar)1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AjUp[offset]), vj, nz)); PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } /* allocate space for the triangular factor information */ PetscCall(PetscNew(&upTriFactor)); upTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(upTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactor->descr, CUSPARSE_DIAG_TYPE_UNIT)); /* set the matrix */ upTriFactor->csrMat = new CsrMatrix; upTriFactor->csrMat->num_rows = A->rmap->n; upTriFactor->csrMat->num_cols = A->cmap->n; upTriFactor->csrMat->num_entries = a->nz; upTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); upTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); upTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); upTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); upTriFactor->csrMat->values = new THRUSTARRAY(a->nz); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); /* set the operation */ upTriFactor->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, &upTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->solveBuffer, upTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtr = upTriFactor; /* allocate space for the triangular factor information */ PetscCall(PetscNew(&loTriFactor)); loTriFactor->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactor->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactor->descr, CUSPARSE_INDEX_BASE_ZERO)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); #else PetscCallCUSPARSE(cusparseSetMatType(loTriFactor->descr, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); #endif PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactor->descr, CUSPARSE_FILL_MODE_UPPER)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactor->descr, CUSPARSE_DIAG_TYPE_NON_UNIT)); /* set the operation */ loTriFactor->solveOp = CUSPARSE_OPERATION_TRANSPOSE; /* set the matrix */ loTriFactor->csrMat = new CsrMatrix; loTriFactor->csrMat->num_rows = A->rmap->n; loTriFactor->csrMat->num_cols = A->cmap->n; loTriFactor->csrMat->num_entries = a->nz; loTriFactor->csrMat->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); loTriFactor->csrMat->row_offsets->assign(AiUp, AiUp + A->rmap->n + 1); loTriFactor->csrMat->column_indices = new THRUSTINTARRAY32(a->nz); loTriFactor->csrMat->column_indices->assign(AjUp, AjUp + a->nz); loTriFactor->csrMat->values = new THRUSTARRAY(a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactor->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, &loTriFactor->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->solveBuffer, loTriFactor->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtr = loTriFactor; PetscCall(PetscLogCpuToGpu(2 * (((A->rmap->n + 1) + (a->nz)) * sizeof(int) + (a->nz) * sizeof(PetscScalar)))); PetscCallCUDA(cudaFreeHost(AiUp)); PetscCallCUDA(cudaFreeHost(AjUp)); } else { /* Fill the upper triangular matrix */ offset = 0; for (i = 0; i < n; i++) { /* set the pointers */ v = aa + ai[i]; nz = ai[i + 1] - ai[i] - 1; /* exclude diag[i] */ /* first, set the diagonal elements */ AAUp[offset] = 1.0 / v[nz]; AALo[offset] = 1.0 / v[nz]; offset += 1; if (nz > 0) { PetscCall(PetscArraycpy(&(AAUp[offset]), v, nz)); for (j = offset; j < offset + nz; j++) { AAUp[j] = -AAUp[j]; AALo[j] = AAUp[j] / v[nz]; } offset += nz; } } PetscCheck(upTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCheck(loTriFactor, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); upTriFactor->csrMat->values->assign(AAUp, AAUp + a->nz); loTriFactor->csrMat->values->assign(AALo, AALo + a->nz); PetscCall(PetscLogCpuToGpu(2 * (a->nz) * sizeof(PetscScalar))); } PetscCallCUDA(cudaFreeHost(AAUp)); PetscCallCUDA(cudaFreeHost(AALo)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; IS ip = a->row; PetscBool perm_identity; PetscInt n = A->rmap->n; PetscFunctionBegin; PetscCheck(cusparseTriFactors, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cusparseTriFactors"); PetscCall(MatSeqAIJCUSPARSEBuildICCTriMatrices(A)); if (!cusparseTriFactors->workVector) cusparseTriFactors->workVector = new THRUSTARRAY(n); cusparseTriFactors->nnz = (a->nz - n) * 2 + n; A->offloadmask = PETSC_OFFLOAD_BOTH; /* lower triangular indices */ PetscCall(ISIdentity(ip, &perm_identity)); if (!perm_identity) { IS iip; const PetscInt *irip, *rip; PetscCall(ISInvertPermutation(ip, PETSC_DECIDE, &iip)); PetscCall(ISGetIndices(iip, &irip)); PetscCall(ISGetIndices(ip, &rip)); cusparseTriFactors->rpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->rpermIndices->assign(rip, rip + n); cusparseTriFactors->cpermIndices = new THRUSTINTARRAY(n); cusparseTriFactors->cpermIndices->assign(irip, irip + n); PetscCall(ISRestoreIndices(iip, &irip)); PetscCall(ISDestroy(&iip)); PetscCall(ISRestoreIndices(ip, &rip)); PetscCall(PetscLogCpuToGpu(2. * n * sizeof(PetscInt))); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorNumeric_SeqAIJCUSPARSE(Mat B, Mat A, const MatFactorInfo *info) { Mat_SeqAIJ *b = (Mat_SeqAIJ *)B->data; IS ip = b->row; PetscBool perm_identity; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); PetscCall(MatCholeskyFactorNumeric_SeqAIJ(B, A, info)); B->offloadmask = PETSC_OFFLOAD_CPU; /* determine which version of MatSolve needs to be used. */ PetscCall(ISIdentity(ip, &perm_identity)); if (perm_identity) { B->ops->solve = MatSolve_SeqAIJCUSPARSE_NaturalOrdering; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } else { B->ops->solve = MatSolve_SeqAIJCUSPARSE; B->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE; B->ops->matsolve = NULL; B->ops->matsolvetranspose = NULL; } /* get the triangular factors */ PetscCall(MatSeqAIJCUSPARSEICCAnalysisAndCopyToGPU(B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(Mat A) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT; cusparseIndexBase_t indexBase; cusparseMatrixType_t matrixType; cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscFunctionBegin; /* allocate space for the transpose of the lower triangular factor */ PetscCall(PetscNew(&loTriFactorT)); loTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the lower triangular factor */ matrixType = cusparseGetMatType(loTriFactor->descr); indexBase = cusparseGetMatIndexBase(loTriFactor->descr); fillMode = cusparseGetMatFillMode(loTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(loTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&loTriFactorT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(loTriFactorT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(loTriFactorT->descr, matrixType)); PetscCallCUSPARSE(cusparseSetMatFillMode(loTriFactorT->descr, fillMode)); PetscCallCUSPARSE(cusparseSetMatDiagType(loTriFactorT->descr, diagType)); /* set the operation */ loTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the lower triangular factor*/ loTriFactorT->csrMat = new CsrMatrix; loTriFactorT->csrMat->num_rows = loTriFactor->csrMat->num_cols; loTriFactorT->csrMat->num_cols = loTriFactor->csrMat->num_rows; loTriFactorT->csrMat->num_entries = loTriFactor->csrMat->num_entries; loTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_rows + 1); loTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(loTriFactorT->csrMat->num_entries); loTriFactorT->csrMat->values = new THRUSTARRAY(loTriFactorT->csrMat->num_entries); /* compute the transpose of the lower triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &loTriFactor->csr2cscBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactor->csr2cscBuffer, loTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_cols, loTriFactor->csrMat->num_entries, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, loTriFactor->csr2cscBuffer); #else loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&loTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, &loTriFactorT->solveBufferSize)); PetscCallCUDA(cudaMalloc(&loTriFactorT->solveBuffer, loTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->loTriFactorPtrTranspose = loTriFactorT; /*********************************************/ /* Now the Transpose of the Upper Tri Factor */ /*********************************************/ /* allocate space for the transpose of the upper triangular factor */ PetscCall(PetscNew(&upTriFactorT)); upTriFactorT->solvePolicy = CUSPARSE_SOLVE_POLICY_USE_LEVEL; /* set the matrix descriptors of the upper triangular factor */ matrixType = cusparseGetMatType(upTriFactor->descr); indexBase = cusparseGetMatIndexBase(upTriFactor->descr); fillMode = cusparseGetMatFillMode(upTriFactor->descr) == CUSPARSE_FILL_MODE_UPPER ? CUSPARSE_FILL_MODE_LOWER : CUSPARSE_FILL_MODE_UPPER; diagType = cusparseGetMatDiagType(upTriFactor->descr); /* Create the matrix description */ PetscCallCUSPARSE(cusparseCreateMatDescr(&upTriFactorT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(upTriFactorT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(upTriFactorT->descr, matrixType)); PetscCallCUSPARSE(cusparseSetMatFillMode(upTriFactorT->descr, fillMode)); PetscCallCUSPARSE(cusparseSetMatDiagType(upTriFactorT->descr, diagType)); /* set the operation */ upTriFactorT->solveOp = CUSPARSE_OPERATION_NON_TRANSPOSE; /* allocate GPU space for the CSC of the upper triangular factor*/ upTriFactorT->csrMat = new CsrMatrix; upTriFactorT->csrMat->num_rows = upTriFactor->csrMat->num_cols; upTriFactorT->csrMat->num_cols = upTriFactor->csrMat->num_rows; upTriFactorT->csrMat->num_entries = upTriFactor->csrMat->num_entries; upTriFactorT->csrMat->row_offsets = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_rows + 1); upTriFactorT->csrMat->column_indices = new THRUSTINTARRAY32(upTriFactorT->csrMat->num_entries); upTriFactorT->csrMat->values = new THRUSTARRAY(upTriFactorT->csrMat->num_entries); /* compute the transpose of the upper triangular factor, i.e. the CSC */ #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseCsr2cscEx2_bufferSize(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, &upTriFactor->csr2cscBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactor->csr2cscBuffer, upTriFactor->csr2cscBufferSize)); #endif PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); { // there is no clean way to have PetscCallCUSPARSE wrapping this function... auto stat = cusparse_csr2csc(cusparseTriFactors->handle, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_cols, upTriFactor->csrMat->num_entries, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactorT->csrMat->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, CUSPARSE_CSR2CSC_ALG1, upTriFactor->csr2cscBuffer); #else upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); #endif PetscCallCUSPARSE(stat); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* Create the solve analysis information */ PetscCall(PetscLogEventBegin(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); PetscCallCUSPARSE(cusparseCreateCsrsvInfo(&upTriFactorT->solveInfo)); #if PETSC_PKG_CUDA_VERSION_GE(9, 0, 0) PetscCallCUSPARSE(cusparseXcsrsv_buffsize(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, &upTriFactorT->solveBufferSize)); PetscCallCUDA(cudaMalloc(&upTriFactorT->solveBuffer, upTriFactorT->solveBufferSize)); #endif /* perform the solve analysis */ /* christ, would it have killed you to put this stuff in a function????????? */ PetscCallCUSPARSE(cusparseXcsrsv_analysis(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSESolveAnalysis, A, 0, 0, 0)); /* assign the pointer */ ((Mat_SeqAIJCUSPARSETriFactors *)A->spptr)->upTriFactorPtrTranspose = upTriFactorT; PetscFunctionReturn(PETSC_SUCCESS); } struct PetscScalarToPetscInt { __host__ __device__ PetscInt operator()(PetscScalar s) { return (PetscInt)PetscRealPart(s); } }; static PetscErrorCode MatSeqAIJCUSPARSEFormExplicitTranspose(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct, *matstructT; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; cusparseStatus_t stat; cusparseIndexBase_t indexBase; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing mat struct"); matstructT = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; PetscCheck(!A->transupdated || matstructT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing matTranspose struct"); if (A->transupdated) PetscFunctionReturn(PETSC_SUCCESS); PetscCall(PetscLogEventBegin(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); PetscCall(PetscLogGpuTimeBegin()); if (cusparsestruct->format != MAT_CUSPARSE_CSR) PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); if (!cusparsestruct->matTranspose) { /* create cusparse matrix */ matstructT = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(cusparseCreateMatDescr(&matstructT->descr)); indexBase = cusparseGetMatIndexBase(matstruct->descr); PetscCallCUSPARSE(cusparseSetMatIndexBase(matstructT->descr, indexBase)); PetscCallCUSPARSE(cusparseSetMatType(matstructT->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); /* set alpha and beta */ PetscCallCUDA(cudaMalloc((void **)&(matstructT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstructT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(matstructT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstructT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstructT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *matrixT = new CsrMatrix; matstructT->mat = matrixT; matrixT->num_rows = A->cmap->n; matrixT->num_cols = A->rmap->n; matrixT->num_entries = a->nz; matrixT->row_offsets = new THRUSTINTARRAY32(matrixT->num_rows + 1); matrixT->column_indices = new THRUSTINTARRAY32(a->nz); matrixT->values = new THRUSTARRAY(a->nz); if (!cusparsestruct->rowoffsets_gpu) cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if PETSC_PKG_CUDA_VERSION_GE(11, 2, 1) stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx type due to THRUSTINTARRAY32 */ indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); #else /* cusparse-11.x returns errors with zero-sized matrices until 11.2.1, see https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cusparse-11.2.1 I don't know what a proper value should be for matstructT->matDescr with empty matrices, so I just set it to NULL to blow it up if one relies on it. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I also set it accordingly. */ if (matrixT->num_entries) { stat = cusparseCreateCsr(&matstructT->matDescr, matrixT->num_rows, matrixT->num_cols, matrixT->num_entries, matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), matrixT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, indexBase, cusparse_scalartype); PetscCallCUSPARSE(stat); } else { matstructT->matDescr = NULL; matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } #endif #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *temp = new CsrMatrix; CsrMatrix *tempT = new CsrMatrix; /* First convert HYB to CSR */ temp->num_rows = A->rmap->n; temp->num_cols = A->cmap->n; temp->num_entries = a->nz; temp->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); temp->column_indices = new THRUSTINTARRAY32(a->nz); temp->values = new THRUSTARRAY(a->nz); stat = cusparse_hyb2csr(cusparsestruct->handle, matstruct->descr, (cusparseHybMat_t)matstruct->mat, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get()); PetscCallCUSPARSE(stat); /* Next, convert CSR to CSC (i.e. the matrix transpose) */ tempT->num_rows = A->rmap->n; tempT->num_cols = A->cmap->n; tempT->num_entries = a->nz; tempT->row_offsets = new THRUSTINTARRAY32(A->rmap->n + 1); tempT->column_indices = new THRUSTINTARRAY32(a->nz); tempT->values = new THRUSTARRAY(a->nz); stat = cusparse_csr2csc(cusparsestruct->handle, temp->num_rows, temp->num_cols, temp->num_entries, temp->values->data().get(), temp->row_offsets->data().get(), temp->column_indices->data().get(), tempT->values->data().get(), tempT->column_indices->data().get(), tempT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); /* Last, convert CSC to HYB */ cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, A->rmap->n, A->cmap->n, matstructT->descr, tempT->values->data().get(), tempT->row_offsets->data().get(), tempT->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstructT->mat = hybMat; A->transupdated = PETSC_TRUE; /* delete temporaries */ if (tempT) { if (tempT->values) delete (THRUSTARRAY *)tempT->values; if (tempT->column_indices) delete (THRUSTINTARRAY32 *)tempT->column_indices; if (tempT->row_offsets) delete (THRUSTINTARRAY32 *)tempT->row_offsets; delete (CsrMatrix *)tempT; } if (temp) { if (temp->values) delete (THRUSTARRAY *)temp->values; if (temp->column_indices) delete (THRUSTINTARRAY32 *)temp->column_indices; if (temp->row_offsets) delete (THRUSTINTARRAY32 *)temp->row_offsets; delete (CsrMatrix *)temp; } #endif } } if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* transpose mat struct may be already present, update data */ CsrMatrix *matrix = (CsrMatrix *)matstruct->mat; CsrMatrix *matrixT = (CsrMatrix *)matstructT->mat; PetscCheck(matrix, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix"); PetscCheck(matrix->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix rows"); PetscCheck(matrix->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix cols"); PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrix values"); PetscCheck(matrixT, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT"); PetscCheck(matrixT->row_offsets, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT rows"); PetscCheck(matrixT->column_indices, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT cols"); PetscCheck(matrixT->values, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CsrMatrixT values"); if (!cusparsestruct->rowoffsets_gpu) { /* this may be absent when we did not construct the transpose with csr2csc */ cusparsestruct->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusparsestruct->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } if (!cusparsestruct->csr2csc_i) { THRUSTARRAY csr2csc_a(matrix->num_entries); PetscCallThrust(thrust::sequence(thrust::device, csr2csc_a.begin(), csr2csc_a.end(), 0.0)); indexBase = cusparseGetMatIndexBase(matstruct->descr); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) void *csr2cscBuffer; size_t csr2cscBufferSize; stat = cusparseCsr2cscEx2_bufferSize(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, matrix->values->data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, &csr2cscBufferSize); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc(&csr2cscBuffer, csr2cscBufferSize)); #endif if (matrix->num_entries) { /* When there are no nonzeros, this routine mistakenly returns CUSPARSE_STATUS_INVALID_VALUE in mat_tests-ex62_15_mpiaijcusparse on ranks 0 and 2 with CUDA-11. But CUDA-10 is OK. I checked every parameters and they were just fine. I have no clue why cusparse complains. Per https://docs.nvidia.com/cuda/cusparse/index.html#csr2cscEx2, when nnz = 0, matrixT->row_offsets[] should be filled with indexBase. So I just take a shortcut here. */ stat = cusparse_csr2csc(cusparsestruct->handle, A->rmap->n, A->cmap->n, matrix->num_entries, csr2csc_a.data().get(), cusparsestruct->rowoffsets_gpu->data().get(), matrix->column_indices->data().get(), matrixT->values->data().get(), #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) matrixT->row_offsets->data().get(), matrixT->column_indices->data().get(), cusparse_scalartype, CUSPARSE_ACTION_NUMERIC, indexBase, cusparsestruct->csr2cscAlg, csr2cscBuffer); PetscCallCUSPARSE(stat); #else matrixT->column_indices->data().get(), matrixT->row_offsets->data().get(), CUSPARSE_ACTION_NUMERIC, indexBase); PetscCallCUSPARSE(stat); #endif } else { matrixT->row_offsets->assign(matrixT->row_offsets->size(), indexBase); } cusparsestruct->csr2csc_i = new THRUSTINTARRAY(matrix->num_entries); PetscCallThrust(thrust::transform(thrust::device, matrixT->values->begin(), matrixT->values->end(), cusparsestruct->csr2csc_i->begin(), PetscScalarToPetscInt())); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUDA(cudaFree(csr2cscBuffer)); #endif } PetscCallThrust( thrust::copy(thrust::device, thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->begin()), thrust::make_permutation_iterator(matrix->values->begin(), cusparsestruct->csr2csc_i->end()), matrixT->values->begin())); } PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogEventEnd(MAT_CUSPARSEGenerateTranspose, A, 0, 0, 0)); /* the compressed row indices is not used for matTranspose */ matstructT->cprowIndices = NULL; /* assign the pointer */ ((Mat_SeqAIJCUSPARSE *)A->spptr)->matTranspose = matstructT; A->transupdated = PETSC_TRUE; PetscFunctionReturn(PETSC_SUCCESS); } /* Why do we need to analyze the transposed matrix again? Can't we just use op(A) = CUSPARSE_OPERATION_TRANSPOSE in MatSolve_SeqAIJCUSPARSE? */ static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { PetscInt n = xx->map->n; const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU + n, cusparseTriFactors->rpermIndices->end()), xGPU); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, xarray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* Last, copy the solution, xGPU, into a temporary with the column permutation ... can't be done in place. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(xGPU, cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(xGPU + n, cusparseTriFactors->cpermIndices->end()), tempGPU->begin()); /* Copy the temporary to the full solution. */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), tempGPU->begin(), tempGPU->end(), xGPU); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Analyze the matrix and create the transpose ... on the fly */ if (!loTriFactorT && !upTriFactorT) { PetscCall(MatSeqAIJCUSPARSEAnalyzeTransposeForSolve(A)); loTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtrTranspose; upTriFactorT = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtrTranspose; } /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactorT->solveOp, upTriFactorT->csrMat->num_rows, upTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactorT->descr, upTriFactorT->csrMat->values->data().get(), upTriFactorT->csrMat->row_offsets->data().get(), upTriFactorT->csrMat->column_indices->data().get(), upTriFactorT->solveInfo, barray, tempGPU->data().get(), upTriFactorT->solvePolicy, upTriFactorT->solveBuffer)); /* Then, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactorT->solveOp, loTriFactorT->csrMat->num_rows, loTriFactorT->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactorT->descr, loTriFactorT->csrMat->values->data().get(), loTriFactorT->csrMat->row_offsets->data().get(), loTriFactorT->csrMat->column_indices->data().get(), loTriFactorT->solveInfo, tempGPU->data().get(), xarray, loTriFactorT->solvePolicy, loTriFactorT->solveBuffer)); /* restore */ PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; thrust::device_ptr<const PetscScalar> bGPU; thrust::device_ptr<PetscScalar> xGPU; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); xGPU = thrust::device_pointer_cast(xarray); bGPU = thrust::device_pointer_cast(barray); PetscCall(PetscLogGpuTimeBegin()); /* First, reorder with the row permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->begin()), thrust::make_permutation_iterator(bGPU, cusparseTriFactors->rpermIndices->end()), tempGPU->begin()); /* Next, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, tempGPU->data().get(), xarray, loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Then, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, xarray, tempGPU->data().get(), upTriFactor->solvePolicy, upTriFactor->solveBuffer)); /* Last, reorder with the column permutation */ thrust::copy(thrust::cuda::par.on(PetscDefaultCudaStream), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->begin()), thrust::make_permutation_iterator(tempGPU->begin(), cusparseTriFactors->cpermIndices->end()), xGPU); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_NaturalOrdering(Mat A, Vec bb, Vec xx) { const PetscScalar *barray; PetscScalar *xarray; Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; Mat_SeqAIJCUSPARSETriFactorStruct *loTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->loTriFactorPtr; Mat_SeqAIJCUSPARSETriFactorStruct *upTriFactor = (Mat_SeqAIJCUSPARSETriFactorStruct *)cusparseTriFactors->upTriFactorPtr; THRUSTARRAY *tempGPU = (THRUSTARRAY *)cusparseTriFactors->workVector; PetscFunctionBegin; /* Get the GPU pointers */ PetscCall(VecCUDAGetArrayWrite(xx, &xarray)); PetscCall(VecCUDAGetArrayRead(bb, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* First, solve L */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, loTriFactor->solveOp, loTriFactor->csrMat->num_rows, loTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, loTriFactor->descr, loTriFactor->csrMat->values->data().get(), loTriFactor->csrMat->row_offsets->data().get(), loTriFactor->csrMat->column_indices->data().get(), loTriFactor->solveInfo, barray, tempGPU->data().get(), loTriFactor->solvePolicy, loTriFactor->solveBuffer)); /* Next, solve U */ PetscCallCUSPARSE(cusparseXcsrsv_solve(cusparseTriFactors->handle, upTriFactor->solveOp, upTriFactor->csrMat->num_rows, upTriFactor->csrMat->num_entries, &PETSC_CUSPARSE_ONE, upTriFactor->descr, upTriFactor->csrMat->values->data().get(), upTriFactor->csrMat->row_offsets->data().get(), upTriFactor->csrMat->column_indices->data().get(), upTriFactor->solveInfo, tempGPU->data().get(), xarray, upTriFactor->solvePolicy, upTriFactor->solveBuffer)); PetscCall(VecCUDARestoreArrayRead(bb, &barray)); PetscCall(VecCUDARestoreArrayWrite(xx, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * cusparseTriFactors->nnz - A->cmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } #if CUSPARSE_VERSION >= 11500 /* cusparseSpSV_solve() and friends first appeared in cusparse-11.3 */ static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ILU0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); // cusparseSpSV_solve() scretely uses the external buffer used in cusparseSpSV_analysis()! /* Solve U*x = y */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, /* U X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolveTranspose_SeqAIJCUSPARSE_ILU0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; if (!fs->createdTransposeSpSVDescr) { /* Call MatSolveTranspose() for the first time */ PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* The matrix is still L. We only do transpose solve with it */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, &fs->spsvBufferSize_Ut)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Ut, fs->spsvBufferSize_Ut)); fs->createdTransposeSpSVDescr = PETSC_TRUE; } if (!fs->updatedTransposeSpSVAnalysis) { PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut, fs->spsvBuffer_Ut)); fs->updatedTransposeSpSVAnalysis = PETSC_TRUE; } PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve Ut*y = b */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, /* Ut Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Ut)); /* Solve Lt*x = y */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ if (m) PetscCallCUSPARSE(cusparseXcsrilu02(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; cusparseStatus_t status; status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &numerical_zero); PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csrilu02: A(%d,%d) is zero", numerical_zero, numerical_zero); } /* cusparseSpSV_analysis() is numeric, i.e., it requires valid matrix values, therefore, we do it after cusparseXcsrilu02() See discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/78 */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, fs->spsvBuffer_U)); /* L, U values have changed, reset the flag to indicate we need to redo cusparseSpSV_analysis() for transpose solve */ fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ILU0; fact->ops->solvetranspose = MatSolveTranspose_SeqAIJCUSPARSE_ILU0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(Mat fact, Mat A, IS, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ILU; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr, sizeof(int) * (m + 1))); PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx, sizeof(int) * nz)); PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr, Ai, sizeof(int) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx, Aj, sizeof(int) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create descriptors for M, L, U */ /* ====================================================================== */ cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = CUSPARSE_FILL_MODE_LOWER; diagType = CUSPARSE_DIAG_TYPE_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); fillMode = CUSPARSE_FILL_MODE_UPPER; diagType = CUSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_U, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_U, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csrilu0, SpSV and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(cusparseCreateCsrilu02Info(&fs->ilu0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsrilu02_bufferSize(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_U, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_U, &fs->spsvBufferSize_U)); /* From my experiment with the example at https://github.com/NVIDIA/CUDALibrarySamples/tree/master/cuSPARSE/bicgstab, and discussion at https://github.com/NVIDIA/CUDALibrarySamples/issues/77, spsvBuffer_L/U can not be shared (i.e., the same) for our case, but factBuffer_M can share with either of spsvBuffer_L/U. To save memory, we make factBuffer_M share with the bigger of spsvBuffer_L/U. */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_U) { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_U, fs->spsvBufferSize_U)); } else { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_U, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_U = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ilu0 on M, SpSv on L and U */ /* The lower(upper) triangular part of M has the same sparsity pattern as L(U)*/ /* ========================================================================== */ int structural_zero; cusparseStatus_t status; fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsrilu02_analysis(fs->handle, m, nz, /* cusparseXcsrilu02 errors out with empty matrices (m=0) */ fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ilu0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function cusparseXcsrilu02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */ status = cusparseXcsrilu02_zeroPivot(fs->handle, fs->ilu0Info_M, &structural_zero); PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csrilu02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, *Adiag, nzRow, nzLeft; PetscLogDouble flops = 0.0; PetscCall(MatMarkDiagonal_SeqAIJ(A)); Ai = Aseq->i; Adiag = Aseq->diag; for (PetscInt i = 0; i < m; i++) { if (Ai[i] < Adiag[i] && Adiag[i] < Ai[i + 1]) { /* There are nonzeros left to the diagonal of row i */ nzRow = Ai[i + 1] - Ai[i]; nzLeft = Adiag[i] - Ai[i]; /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->lufactornumeric = MatILUFactorNumeric_SeqAIJCUSPARSE_ILU0; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSolve_SeqAIJCUSPARSE_ICC0(Mat fact, Vec b, Vec x) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; const PetscScalar *barray; PetscScalar *xarray; PetscFunctionBegin; PetscCall(VecCUDAGetArrayWrite(x, &xarray)); PetscCall(VecCUDAGetArrayRead(b, &barray)); PetscCall(PetscLogGpuTimeBegin()); /* Solve L*y = b */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, (void *)barray)); PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_Y, fs->Y)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* L Y = X */ fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L)); /* Solve Lt*x = y */ PetscCallCUSPARSE(cusparseDnVecSetValues(fs->dnVecDescr_X, xarray)); PetscCallCUSPARSE(cusparseSpSV_solve(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, /* Lt X = Y */ fs->dnVecDescr_Y, fs->dnVecDescr_X, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt)); PetscCall(VecCUDARestoreArrayRead(b, &barray)); PetscCall(VecCUDARestoreArrayWrite(x, &xarray)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(2.0 * aij->nz - fact->rmap->n)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, const MatFactorInfo *) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *Acsr; PetscInt m, nz; PetscBool flg; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); } /* Copy A's value to fact */ m = fact->rmap->n; nz = aij->nz; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); Acsr = (CsrMatrix *)Acusp->mat->mat; PetscCallCUDA(cudaMemcpyAsync(fs->csrVal, Acsr->values->data().get(), sizeof(PetscScalar) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* Factorize fact inplace */ /* https://docs.nvidia.com/cuda/cusparse/index.html#csric02_solve Function csric02() only takes the lower triangular part of matrix A to perform factorization. The matrix type must be CUSPARSE_MATRIX_TYPE_GENERAL, the fill mode and diagonal type are ignored, and the strictly upper triangular part is ignored and never touched. It does not matter if A is Hermitian or not. In other words, from the point of view of csric02() A is Hermitian and only the lower triangular part is provided. */ if (m) PetscCallCUSPARSE(cusparseXcsric02(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { int numerical_zero; cusparseStatus_t status; status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &numerical_zero); PetscAssert(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Numerical zero pivot detected in csric02: A(%d,%d) is zero", numerical_zero, numerical_zero); } PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, fs->spsvBuffer_L)); /* Note that cusparse reports this error if we use double and CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE ** On entry to cusparseSpSV_analysis(): conjugate transpose (opA) is not supported for matA data type, current -> CUDA_R_64F */ PetscCallCUSPARSE(cusparseSpSV_analysis(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, fs->spsvBuffer_Lt)); fact->offloadmask = PETSC_OFFLOAD_GPU; fact->ops->solve = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->solvetranspose = MatSolve_SeqAIJCUSPARSE_ICC0; fact->ops->matsolve = NULL; fact->ops->matsolvetranspose = NULL; PetscCall(PetscLogGpuFlops(fs->numericFactFlops)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(Mat fact, Mat A, IS, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)fact->spptr; Mat_SeqAIJ *aij = (Mat_SeqAIJ *)fact->data; PetscInt m, nz; PetscFunctionBegin; if (PetscDefined(USE_DEBUG)) { PetscInt i; PetscBool flg, missing; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Expected MATSEQAIJCUSPARSE, but input is %s", ((PetscObject)A)->type_name); PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Must be square matrix, rows %" PetscInt_FMT " columns %" PetscInt_FMT, A->rmap->n, A->cmap->n); PetscCall(MatMissingDiagonal(A, &missing, &i)); PetscCheck(!missing, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "Matrix is missing diagonal entry %" PetscInt_FMT, i); } /* Free the old stale stuff */ PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&fs)); /* Copy over A's meta data to fact. Note that we also allocated fact's i,j,a on host, but they will not be used. Allocate them just for easy debugging. */ PetscCall(MatDuplicateNoCreate_SeqAIJ(fact, A, MAT_DO_NOT_COPY_VALUES, PETSC_TRUE /*malloc*/)); fact->offloadmask = PETSC_OFFLOAD_BOTH; fact->factortype = MAT_FACTOR_ICC; fact->info.factor_mallocs = 0; fact->info.fill_ratio_given = info->fill; fact->info.fill_ratio_needed = 1.0; aij->row = NULL; aij->col = NULL; /* ====================================================================== */ /* Copy A's i, j to fact and also allocate the value array of fact. */ /* We'll do in-place factorization on fact */ /* ====================================================================== */ const int *Ai, *Aj; m = fact->rmap->n; nz = aij->nz; PetscCallCUDA(cudaMalloc((void **)&fs->csrRowPtr, sizeof(int) * (m + 1))); PetscCallCUDA(cudaMalloc((void **)&fs->csrColIdx, sizeof(int) * nz)); PetscCallCUDA(cudaMalloc((void **)&fs->csrVal, sizeof(PetscScalar) * nz)); PetscCall(MatSeqAIJCUSPARSEGetIJ(A, PETSC_FALSE, &Ai, &Aj)); /* Do not use compressed Ai */ PetscCallCUDA(cudaMemcpyAsync(fs->csrRowPtr, Ai, sizeof(int) * (m + 1), cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); PetscCallCUDA(cudaMemcpyAsync(fs->csrColIdx, Aj, sizeof(int) * nz, cudaMemcpyDeviceToDevice, PetscDefaultCudaStream)); /* ====================================================================== */ /* Create mat descriptors for M, L */ /* ====================================================================== */ cusparseFillMode_t fillMode; cusparseDiagType_t diagType; PetscCallCUSPARSE(cusparseCreateMatDescr(&fs->matDescr_M)); PetscCallCUSPARSE(cusparseSetMatIndexBase(fs->matDescr_M, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(fs->matDescr_M, CUSPARSE_MATRIX_TYPE_GENERAL)); /* https://docs.nvidia.com/cuda/cusparse/index.html#cusparseDiagType_t cusparseDiagType_t: This type indicates if the matrix diagonal entries are unity. The diagonal elements are always assumed to be present, but if CUSPARSE_DIAG_TYPE_UNIT is passed to an API routine, then the routine assumes that all diagonal entries are unity and will not read or modify those entries. Note that in this case the routine assumes the diagonal entries are equal to one, regardless of what those entries are actually set to in memory. */ fillMode = CUSPARSE_FILL_MODE_LOWER; diagType = CUSPARSE_DIAG_TYPE_NON_UNIT; PetscCallCUSPARSE(cusparseCreateCsr(&fs->spMatDescr_L, m, m, nz, fs->csrRowPtr, fs->csrColIdx, fs->csrVal, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_FILL_MODE, &fillMode, sizeof(fillMode))); PetscCallCUSPARSE(cusparseSpMatSetAttribute(fs->spMatDescr_L, CUSPARSE_SPMAT_DIAG_TYPE, &diagType, sizeof(diagType))); /* ========================================================================= */ /* Query buffer sizes for csric0, SpSV of L and Lt, and allocate buffers */ /* ========================================================================= */ PetscCallCUSPARSE(cusparseCreateCsric02Info(&fs->ic0Info_M)); if (m) PetscCallCUSPARSE(cusparseXcsric02_bufferSize(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, &fs->factBufferSize_M)); PetscCallCUDA(cudaMalloc((void **)&fs->X, sizeof(PetscScalar) * m)); PetscCallCUDA(cudaMalloc((void **)&fs->Y, sizeof(PetscScalar) * m)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_X, m, fs->X, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&fs->dnVecDescr_Y, m, fs->Y, cusparse_scalartype)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_NON_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_L, &fs->spsvBufferSize_L)); PetscCallCUSPARSE(cusparseSpSV_createDescr(&fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_bufferSize(fs->handle, CUSPARSE_OPERATION_TRANSPOSE, &PETSC_CUSPARSE_ONE, fs->spMatDescr_L, fs->dnVecDescr_X, fs->dnVecDescr_Y, cusparse_scalartype, CUSPARSE_SPSV_ALG_DEFAULT, fs->spsvDescr_Lt, &fs->spsvBufferSize_Lt)); /* To save device memory, we make the factorization buffer share with one of the solver buffer. See also comments in MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(). */ if (fs->spsvBufferSize_L > fs->spsvBufferSize_Lt) { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_L, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_L = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_Lt, fs->spsvBufferSize_Lt)); } else { PetscCallCUDA(cudaMalloc((void **)&fs->factBuffer_M, PetscMax(fs->spsvBufferSize_Lt, (size_t)fs->factBufferSize_M))); fs->spsvBuffer_Lt = fs->factBuffer_M; PetscCallCUDA(cudaMalloc((void **)&fs->spsvBuffer_L, fs->spsvBufferSize_L)); } /* ========================================================================== */ /* Perform analysis of ic0 on M */ /* The lower triangular part of M has the same sparsity pattern as L */ /* ========================================================================== */ int structural_zero; cusparseStatus_t status; fs->policy_M = CUSPARSE_SOLVE_POLICY_USE_LEVEL; if (m) PetscCallCUSPARSE(cusparseXcsric02_analysis(fs->handle, m, nz, fs->matDescr_M, fs->csrVal, fs->csrRowPtr, fs->csrColIdx, fs->ic0Info_M, fs->policy_M, fs->factBuffer_M)); if (PetscDefined(USE_DEBUG)) { /* Function cusparseXcsric02_zeroPivot() is a blocking call. It calls cudaDeviceSynchronize() to make sure all previous kernels are done. */ status = cusparseXcsric02_zeroPivot(fs->handle, fs->ic0Info_M, &structural_zero); PetscCheck(CUSPARSE_STATUS_ZERO_PIVOT != status, PETSC_COMM_SELF, PETSC_ERR_USER_INPUT, "Structural zero pivot detected in csric02: A(%d,%d) is missing", structural_zero, structural_zero); } /* Estimate FLOPs of the numeric factorization */ { Mat_SeqAIJ *Aseq = (Mat_SeqAIJ *)A->data; PetscInt *Ai, nzRow, nzLeft; PetscLogDouble flops = 0.0; Ai = Aseq->i; for (PetscInt i = 0; i < m; i++) { nzRow = Ai[i + 1] - Ai[i]; if (nzRow > 1) { /* We want to eliminate nonzeros left to the diagonal one by one. Assume each time, nonzeros right and include the eliminated one will be updated, which incurs a multiplication and an addition. */ nzLeft = (nzRow - 1) / 2; flops += nzLeft * (2.0 * nzRow - nzLeft + 1); } } fs->numericFactFlops = flops; } fact->ops->choleskyfactornumeric = MatICCFactorNumeric_SeqAIJCUSPARSE_ICC0; PetscFunctionReturn(PETSC_SUCCESS); } #endif static PetscErrorCode MatILUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if CUSPARSE_VERSION >= 11500 PetscBool row_identity = PETSC_FALSE, col_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) { PetscCall(ISIdentity(isrow, &row_identity)); PetscCall(ISIdentity(iscol, &col_identity)); } if (!info->levels && row_identity && col_identity) { PetscCall(MatILUFactorSymbolic_SeqAIJCUSPARSE_ILU0(B, A, isrow, iscol, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatILUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatLUFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS isrow, IS iscol, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatLUFactorSymbolic_SeqAIJ(B, A, isrow, iscol, info)); B->ops->lufactornumeric = MatLUFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatICCFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; #if CUSPARSE_VERSION >= 11500 PetscBool perm_identity = PETSC_FALSE; if (cusparseTriFactors->factorizeOnDevice) PetscCall(ISIdentity(perm, &perm_identity)); if (!info->levels && perm_identity) { PetscCall(MatICCFactorSymbolic_SeqAIJCUSPARSE_ICC0(B, A, perm, info)); } else #endif { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatICCFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatCholeskyFactorSymbolic_SeqAIJCUSPARSE(Mat B, Mat A, IS perm, const MatFactorInfo *info) { Mat_SeqAIJCUSPARSETriFactors *cusparseTriFactors = (Mat_SeqAIJCUSPARSETriFactors *)B->spptr; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(&cusparseTriFactors)); PetscCall(MatCholeskyFactorSymbolic_SeqAIJ(B, A, perm, info)); B->ops->choleskyfactornumeric = MatCholeskyFactorNumeric_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatFactorGetSolverType_seqaij_cusparse(Mat, MatSolverType *type) { PetscFunctionBegin; *type = MATSOLVERCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSOLVERCUSPARSE = "cusparse" - A matrix type providing triangular solvers for seq matrices on a single GPU of type, `MATSEQAIJCUSPARSE`. Currently supported algorithms are ILU(k) and ICC(k). Typically, deeper factorizations (larger k) results in poorer performance in the triangular solves. Full LU, and Cholesky decompositions can be solved through the CuSPARSE triangular solve algorithm. However, the performance can be quite poor and thus these algorithms are not recommended. This class does NOT support direct solver operations. Level: beginner .seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `PCFactorSetMatSolverType()`, `MatSolverType`, `MatCreateSeqAIJCUSPARSE()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse(Mat A, MatFactorType ftype, Mat *B) { PetscInt n = A->rmap->n; PetscBool factOnDevice, factOnHost; char *prefix; char factPlace[32] = "device"; /* the default */ PetscFunctionBegin; PetscCall(MatCreate(PetscObjectComm((PetscObject)A), B)); PetscCall(MatSetSizes(*B, n, n, n, n)); (*B)->factortype = ftype; PetscCall(MatSetType(*B, MATSEQAIJCUSPARSE)); prefix = (*B)->factorprefix ? (*B)->factorprefix : ((PetscObject)A)->prefix; PetscOptionsBegin(PetscObjectComm((PetscObject)(*B)), prefix, "MatGetFactor", "Mat"); PetscCall(PetscOptionsString("-mat_factor_bind_factorization", "Do matrix factorization on host or device when possible", "MatGetFactor", NULL, factPlace, sizeof(factPlace), NULL)); PetscOptionsEnd(); PetscCall(PetscStrcasecmp("device", factPlace, &factOnDevice)); PetscCall(PetscStrcasecmp("host", factPlace, &factOnHost)); PetscCheck(factOnDevice || factOnHost, PetscObjectComm((PetscObject)(*B)), PETSC_ERR_ARG_OUTOFRANGE, "Wrong option %s to -mat_factor_bind_factorization <string>. Only host and device are allowed", factPlace); ((Mat_SeqAIJCUSPARSETriFactors *)(*B)->spptr)->factorizeOnDevice = factOnDevice; if (A->boundtocpu && A->bindingpropagates) PetscCall(MatBindToCPU(*B, PETSC_TRUE)); if (ftype == MAT_FACTOR_LU || ftype == MAT_FACTOR_ILU || ftype == MAT_FACTOR_ILUDT) { PetscCall(MatSetBlockSizesFromMats(*B, A, A)); if (!A->boundtocpu) { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->ilufactorsymbolic = MatILUFactorSymbolic_SeqAIJ; (*B)->ops->lufactorsymbolic = MatLUFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_LU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILU])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ILUDT])); } else if (ftype == MAT_FACTOR_CHOLESKY || ftype == MAT_FACTOR_ICC) { if (!A->boundtocpu) { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJCUSPARSE; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJCUSPARSE; } else { (*B)->ops->iccfactorsymbolic = MatICCFactorSymbolic_SeqAIJ; (*B)->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_SeqAIJ; } PetscCall(PetscStrallocpy(MATORDERINGND, (char **)&(*B)->preferredordering[MAT_FACTOR_CHOLESKY])); PetscCall(PetscStrallocpy(MATORDERINGNATURAL, (char **)&(*B)->preferredordering[MAT_FACTOR_ICC])); } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "Factor type not supported for CUSPARSE Matrix Types"); PetscCall(MatSeqAIJSetPreallocation(*B, MAT_SKIP_ALLOCATION, NULL)); (*B)->canuseordering = PETSC_TRUE; PetscCall(PetscObjectComposeFunction((PetscObject)(*B), "MatFactorGetSolverType_C", MatFactorGetSolverType_seqaij_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSECopyFromGPU(Mat A) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; #if CUSPARSE_VERSION >= 13500 Mat_SeqAIJCUSPARSETriFactors *fs = (Mat_SeqAIJCUSPARSETriFactors *)A->spptr; #endif PetscFunctionBegin; if (A->offloadmask == PETSC_OFFLOAD_GPU) { PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); if (A->factortype == MAT_FACTOR_NONE) { CsrMatrix *matrix = (CsrMatrix *)cusp->mat->mat; PetscCallCUDA(cudaMemcpy(a->a, matrix->values->data().get(), a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); } #if CUSPARSE_VERSION >= 13500 else if (fs->csrVal) { /* We have a factorized matrix on device and are able to copy it to host */ PetscCallCUDA(cudaMemcpy(a->a, fs->csrVal, a->nz * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); } #endif else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "No support for copying this type of factorized matrix from device to host"); PetscCall(PetscLogGpuToCpu(a->nz * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyFromGPU, A, 0, 0, 0)); A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArray_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayRead_SeqAIJCUSPARSE(Mat A, const PetscScalar *array[]) { PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE(Mat, const PetscScalar *array[]) { PetscFunctionBegin; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; *array = ((Mat_SeqAIJ *)A->data)->a; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE(Mat A, PetscScalar *array[]) { PetscFunctionBegin; A->offloadmask = PETSC_OFFLOAD_CPU; *array = NULL; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE(Mat A, const PetscInt **i, const PetscInt **j, PetscScalar **a, PetscMemType *mtype) { Mat_SeqAIJCUSPARSE *cusp; CsrMatrix *matrix; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(A->factortype == MAT_FACTOR_NONE, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "Not for factored matrix"); cusp = static_cast<Mat_SeqAIJCUSPARSE *>(A->spptr); PetscCheck(cusp != NULL, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONGSTATE, "cusp is NULL"); matrix = (CsrMatrix *)cusp->mat->mat; if (i) { #if !defined(PETSC_USE_64BIT_INDICES) *i = matrix->row_offsets->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (j) { #if !defined(PETSC_USE_64BIT_INDICES) *j = matrix->column_indices->data().get(); #else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSparse does not supported 64-bit indices"); #endif } if (a) *a = matrix->values->data().get(); if (mtype) *mtype = PETSC_MEMTYPE_CUDA; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatSeqAIJCUSPARSECopyToGPU(Mat A) { Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct = cusparsestruct->mat; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt m = A->rmap->n, *ii, *ridx, tmp; cusparseStatus_t stat; PetscBool both = PETSC_TRUE; PetscFunctionBegin; PetscCheck(!A->boundtocpu, PETSC_COMM_SELF, PETSC_ERR_GPU, "Cannot copy to GPU"); if (A->offloadmask == PETSC_OFFLOAD_UNALLOCATED || A->offloadmask == PETSC_OFFLOAD_CPU) { if (A->nonzerostate == cusparsestruct->nonzerostate && cusparsestruct->format == MAT_CUSPARSE_CSR) { /* Copy values only */ CsrMatrix *matrix; matrix = (CsrMatrix *)cusparsestruct->mat->mat; PetscCheck(!a->nz || a->a, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR values"); PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); matrix->values->assign(a->a, a->a + a->nz); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogCpuToGpu((a->nz) * sizeof(PetscScalar))); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); } else { PetscInt nnz; PetscCall(PetscLogEventBegin(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusparsestruct->mat, cusparsestruct->format)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); delete cusparsestruct->workVector; delete cusparsestruct->rowoffsets_gpu; cusparsestruct->workVector = NULL; cusparsestruct->rowoffsets_gpu = NULL; try { if (a->compressedrow.use) { m = a->compressedrow.nrows; ii = a->compressedrow.i; ridx = a->compressedrow.rindex; } else { m = A->rmap->n; ii = a->i; ridx = NULL; } PetscCheck(ii, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR row data"); if (!a->a) { nnz = ii[m]; both = PETSC_FALSE; } else nnz = a->nz; PetscCheck(!nnz || a->j, PETSC_COMM_SELF, PETSC_ERR_GPU, "Missing CSR column data"); /* create cusparse matrix */ cusparsestruct->nrows = m; matstruct = new Mat_SeqAIJCUSPARSEMultStruct; PetscCallCUSPARSE(cusparseCreateMatDescr(&matstruct->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(matstruct->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(matstruct->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(matstruct->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(matstruct->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(matstruct->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstruct->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(matstruct->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUSPARSE(cusparseSetPointerMode(cusparsestruct->handle, CUSPARSE_POINTER_MODE_DEVICE)); /* Build a hybrid/ellpack matrix if this option is chosen for the storage */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { /* set the matrix */ CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); /* assign the pointer */ matstruct->mat = mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mat->num_rows) { /* cusparse errors on empty matrices! */ stat = cusparseCreateCsr(&matstruct->matDescr, mat->num_rows, mat->num_cols, mat->num_entries, mat->row_offsets->data().get(), mat->column_indices->data().get(), mat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } #endif } else if (cusparsestruct->format == MAT_CUSPARSE_ELL || cusparsestruct->format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else CsrMatrix *mat = new CsrMatrix; mat->num_rows = m; mat->num_cols = A->cmap->n; mat->num_entries = nnz; mat->row_offsets = new THRUSTINTARRAY32(m + 1); mat->row_offsets->assign(ii, ii + m + 1); mat->column_indices = new THRUSTINTARRAY32(nnz); mat->column_indices->assign(a->j, a->j + nnz); mat->values = new THRUSTARRAY(nnz); if (a->a) mat->values->assign(a->a, a->a + nnz); cusparseHybMat_t hybMat; PetscCallCUSPARSE(cusparseCreateHybMat(&hybMat)); cusparseHybPartition_t partition = cusparsestruct->format == MAT_CUSPARSE_ELL ? CUSPARSE_HYB_PARTITION_MAX : CUSPARSE_HYB_PARTITION_AUTO; stat = cusparse_csr2hyb(cusparsestruct->handle, mat->num_rows, mat->num_cols, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), hybMat, 0, partition); PetscCallCUSPARSE(stat); /* assign the pointer */ matstruct->mat = hybMat; if (mat) { if (mat->values) delete (THRUSTARRAY *)mat->values; if (mat->column_indices) delete (THRUSTINTARRAY32 *)mat->column_indices; if (mat->row_offsets) delete (THRUSTINTARRAY32 *)mat->row_offsets; delete (CsrMatrix *)mat; } #endif } /* assign the compressed row indices */ if (a->compressedrow.use) { cusparsestruct->workVector = new THRUSTARRAY(m); matstruct->cprowIndices = new THRUSTINTARRAY(m); matstruct->cprowIndices->assign(ridx, ridx + m); tmp = m; } else { cusparsestruct->workVector = NULL; matstruct->cprowIndices = NULL; tmp = 0; } PetscCall(PetscLogCpuToGpu(((m + 1) + (a->nz)) * sizeof(int) + tmp * sizeof(PetscInt) + (3 + (a->nz)) * sizeof(PetscScalar))); /* assign the pointer */ cusparsestruct->mat = matstruct; } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogEventEnd(MAT_CUSPARSECopyToGPU, A, 0, 0, 0)); cusparsestruct->nonzerostate = A->nonzerostate; } if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; } PetscFunctionReturn(PETSC_SUCCESS); } struct VecCUDAPlusEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<1>(t) + thrust::get<0>(t); } }; struct VecCUDAEquals { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<1>(t) = thrust::get<0>(t); } }; struct VecCUDAEqualsReverse { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t); } }; struct MatMatCusparse { PetscBool cisdense; PetscScalar *Bt; Mat X; PetscBool reusesym; /* Cusparse does not have split symbolic and numeric phases for sparse matmat operations */ PetscLogDouble flops; CsrMatrix *Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseSpMatDescr_t matSpBDescr; PetscBool initialized; /* C = alpha op(A) op(B) + beta C */ cusparseDnMatDescr_t matBDescr; cusparseDnMatDescr_t matCDescr; PetscInt Blda, Clda; /* Record leading dimensions of B and C here to detect changes*/ #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) void *dBuffer4; void *dBuffer5; #endif size_t mmBufferSize; void *mmBuffer; void *mmBuffer2; /* SpGEMM WorkEstimation buffer */ cusparseSpGEMMDescr_t spgemmDesc; #endif }; static PetscErrorCode MatDestroy_MatMatCusparse(void *data) { MatMatCusparse *mmdata = (MatMatCusparse *)data; PetscFunctionBegin; PetscCallCUDA(cudaFree(mmdata->Bt)); delete mmdata->Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (mmdata->matSpBDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mmdata->matSpBDescr)); if (mmdata->matBDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr)); if (mmdata->matCDescr) PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr)); if (mmdata->spgemmDesc) PetscCallCUSPARSE(cusparseSpGEMM_destroyDescr(mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) if (mmdata->dBuffer4) PetscCallCUDA(cudaFree(mmdata->dBuffer4)); if (mmdata->dBuffer5) PetscCallCUDA(cudaFree(mmdata->dBuffer5)); #endif if (mmdata->mmBuffer) PetscCallCUDA(cudaFree(mmdata->mmBuffer)); if (mmdata->mmBuffer2) PetscCallCUDA(cudaFree(mmdata->mmBuffer2)); #endif PetscCall(MatDestroy(&mmdata->X)); PetscCall(PetscFree(data)); PetscFunctionReturn(PETSC_SUCCESS); } #include <../src/mat/impls/dense/seq/dense.h> // MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal() static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n, blda, clda; PetscBool flg, biscuda; Mat_SeqAIJCUSPARSE *cusp; cusparseStatus_t stat; cusparseOperation_t opA; const PetscScalar *barray; PetscScalar *carray; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSEMultStruct *mat; CsrMatrix *csrmat; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); mmdata = (MatMatCusparse *)product->data; A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); /* currently CopyToGpu does not copy if the matrix is bound to CPU Instead of silently accepting the wrong answer, I prefer to raise the error */ PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)A), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_PtAP: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: if (!A->form_explicit_transpose) { mat = cusp->mat; opA = CUSPARSE_OPERATION_TRANSPOSE; } else { PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); mat = cusp->matTranspose; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; } m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: case MATPRODUCT_RARt: mat = cusp->mat; opA = CUSPARSE_OPERATION_NON_TRANSPOSE; m = A->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCheck(mat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csrmat = (CsrMatrix *)mat->mat; /* if the user passed a CPU matrix, copy the data to the GPU */ PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQDENSECUDA, &biscuda)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSECUDA, MAT_INPLACE_MATRIX, &B)); PetscCall(MatDenseGetArrayReadAndMemType(B, &barray, nullptr)); PetscCall(MatDenseGetLDA(B, &blda)); if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseGetArrayWriteAndMemType(mmdata->X, &carray, nullptr)); PetscCall(MatDenseGetLDA(mmdata->X, &clda)); } else { PetscCall(MatDenseGetArrayWriteAndMemType(C, &carray, nullptr)); PetscCall(MatDenseGetLDA(C, &clda)); } PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseOperation_t opB = (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) ? CUSPARSE_OPERATION_TRANSPOSE : CUSPARSE_OPERATION_NON_TRANSPOSE; /* (re)allocate mmBuffer if not initialized or LDAs are different */ if (!mmdata->initialized || mmdata->Blda != blda || mmdata->Clda != clda) { size_t mmBufferSize; if (mmdata->initialized && mmdata->Blda != blda) { PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matBDescr)); mmdata->matBDescr = NULL; } if (!mmdata->matBDescr) { PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matBDescr, B->rmap->n, B->cmap->n, blda, (void *)barray, cusparse_scalartype, CUSPARSE_ORDER_COL)); mmdata->Blda = blda; } if (mmdata->initialized && mmdata->Clda != clda) { PetscCallCUSPARSE(cusparseDestroyDnMat(mmdata->matCDescr)); mmdata->matCDescr = NULL; } if (!mmdata->matCDescr) { /* matCDescr is for C or mmdata->X */ PetscCallCUSPARSE(cusparseCreateDnMat(&mmdata->matCDescr, m, n, clda, (void *)carray, cusparse_scalartype, CUSPARSE_ORDER_COL)); mmdata->Clda = clda; } if (!mat->matDescr) { stat = cusparseCreateCsr(&mat->matDescr, csrmat->num_rows, csrmat->num_cols, csrmat->num_entries, csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), csrmat->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, /* row offset, col idx types due to THRUSTINTARRAY32 */ CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } stat = cusparseSpMM_bufferSize(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, &mmBufferSize); PetscCallCUSPARSE(stat); if ((mmdata->mmBuffer && mmdata->mmBufferSize < mmBufferSize) || !mmdata->mmBuffer) { PetscCallCUDA(cudaFree(mmdata->mmBuffer)); PetscCallCUDA(cudaMalloc(&mmdata->mmBuffer, mmBufferSize)); mmdata->mmBufferSize = mmBufferSize; } mmdata->initialized = PETSC_TRUE; } else { /* to be safe, always update pointers of the mats */ PetscCallCUSPARSE(cusparseSpMatSetValues(mat->matDescr, csrmat->values->data().get())); PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matBDescr, (void *)barray)); PetscCallCUSPARSE(cusparseDnMatSetValues(mmdata->matCDescr, (void *)carray)); } /* do cusparseSpMM, which supports transpose on B */ stat = cusparseSpMM(cusp->handle, opA, opB, mat->alpha_one, mat->matDescr, mmdata->matBDescr, mat->beta_zero, mmdata->matCDescr, cusparse_scalartype, cusp->spmmAlg, mmdata->mmBuffer); PetscCallCUSPARSE(stat); #else PetscInt k; /* cusparseXcsrmm does not support transpose on B */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) { cublasHandle_t cublasv2handle; cublasStatus_t cerr; PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); cerr = cublasXgeam(cublasv2handle, CUBLAS_OP_T, CUBLAS_OP_T, B->cmap->n, B->rmap->n, &PETSC_CUSPARSE_ONE, barray, blda, &PETSC_CUSPARSE_ZERO, barray, blda, mmdata->Bt, B->cmap->n); PetscCallCUBLAS(cerr); blda = B->cmap->n; k = B->cmap->n; } else { k = B->rmap->n; } /* perform the MatMat operation, op(A) is m x k, op(B) is k x n */ stat = cusparse_csr_spmm(cusp->handle, opA, m, n, k, csrmat->num_entries, mat->alpha_one, mat->descr, csrmat->values->data().get(), csrmat->row_offsets->data().get(), csrmat->column_indices->data().get(), mmdata->Bt ? mmdata->Bt : barray, blda, mat->beta_zero, carray, clda); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuTimeEnd()); PetscCall(PetscLogGpuFlops(n * 2.0 * csrmat->num_entries)); PetscCall(MatDenseRestoreArrayReadAndMemType(B, &barray)); if (product->type == MATPRODUCT_RARt) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_FALSE, PETSC_FALSE)); } else if (product->type == MATPRODUCT_PtAP) { PetscCall(MatDenseRestoreArrayWriteAndMemType(mmdata->X, &carray)); PetscCall(MatMatMultNumeric_SeqDenseCUDA_SeqDenseCUDA_Internal(B, mmdata->X, C, PETSC_TRUE, PETSC_FALSE)); } else { PetscCall(MatDenseRestoreArrayWriteAndMemType(C, &carray)); } if (mmdata->cisdense) PetscCall(MatConvert(C, MATSEQDENSE, MAT_INPLACE_MATRIX, &C)); if (!biscuda) PetscCall(MatConvert(B, MATSEQDENSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA(Mat C) { Mat_Product *product = C->product; Mat A, B; PetscInt m, n; PetscBool cisdense, flg; MatMatCusparse *mmdata; Mat_SeqAIJCUSPARSE *cusp; PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCheck(cusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); switch (product->type) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; break; case MATPRODUCT_PtAP: m = B->cmap->n; n = B->cmap->n; break; case MATPRODUCT_RARt: m = B->rmap->n; n = B->rmap->n; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } PetscCall(MatSetSizes(C, m, n, m, n)); /* if C is of type MATSEQDENSE (CPU), perform the operation on the GPU and then copy on the CPU */ PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQDENSE, &cisdense)); PetscCall(MatSetType(C, MATSEQDENSECUDA)); /* product data */ PetscCall(PetscNew(&mmdata)); mmdata->cisdense = cisdense; #if PETSC_PKG_CUDA_VERSION_LT(11, 0, 0) /* cusparseXcsrmm does not support transpose on B, so we allocate buffer to store B^T */ if (product->type == MATPRODUCT_ABt || product->type == MATPRODUCT_RARt) PetscCallCUDA(cudaMalloc((void **)&mmdata->Bt, (size_t)B->rmap->n * (size_t)B->cmap->n * sizeof(PetscScalar))); #endif /* for these products we need intermediate storage */ if (product->type == MATPRODUCT_RARt || product->type == MATPRODUCT_PtAP) { PetscCall(MatCreate(PetscObjectComm((PetscObject)C), &mmdata->X)); PetscCall(MatSetType(mmdata->X, MATSEQDENSECUDA)); if (product->type == MATPRODUCT_RARt) { /* do not preallocate, since the first call to MatDenseCUDAGetArray will preallocate on the GPU for us */ PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->rmap->n, A->rmap->n, B->rmap->n)); } else { PetscCall(MatSetSizes(mmdata->X, A->rmap->n, B->cmap->n, A->rmap->n, B->cmap->n)); } } C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqDENSECUDA; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *c = (Mat_SeqAIJ *)C->data; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscBool flg; cusparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) cusparseSpMatDescr_t BmatSpDescr; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data empty"); PetscCall(PetscObjectTypeCompare((PetscObject)C, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for C of type %s", ((PetscObject)C)->type_name); mmdata = (MatMatCusparse *)C->product->data; A = product->A; B = product->B; if (mmdata->reusesym) { /* this happens when api_user is true, meaning that the matrix values have been already computed in the MatProductSymbolic phase */ mmdata->reusesym = PETSC_FALSE; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); Cmat = Ccusp->mat; PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[C->product->type]); Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); goto finalize; } if (!c->nz) goto finalize; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); PetscCheck(!A->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); PetscCheck(!B->boundtocpu, PetscObjectComm((PetscObject)C), PETSC_ERR_ARG_WRONG, "Cannot bind to CPU a CUSPARSE matrix between MatProductSymbolic and MatProductNumeric phases"); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Ccusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_A_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that A is symmetric"); } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; PetscCheck(product->symbolic_used_the_fact_B_is_symmetric, PetscObjectComm((PetscObject)C), PETSC_ERR_PLIB, "Symbolic should have been built using the fact that B is symmetric"); } switch (ptype) { case MATPRODUCT_AB: Amat = Acusp->mat; Bmat = Bcusp->mat; break; case MATPRODUCT_AtB: Amat = Acusp->matTranspose; Bmat = Bcusp->mat; break; case MATPRODUCT_ABt: Amat = Acusp->mat; Bmat = Bcusp->matTranspose; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } Cmat = Ccusp->mat; PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Cmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; Bcsr = mmdata->Bcsr ? mmdata->Bcsr : (CsrMatrix *)Bmat->mat; /* B may be in compressed row storage */ Ccsr = (CsrMatrix *)Cmat->mat; PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); PetscCheck(Ccsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing C CSR struct"); PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = mmdata->Bcsr ? mmdata->matSpBDescr : Bmat->matDescr; /* B may be in compressed row storage */ PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #else stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif #else stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCallCUDA(WaitForCUDA()); PetscCall(PetscLogGpuTimeEnd()); C->offloadmask = PETSC_OFFLOAD_GPU; finalize: /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(C, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", C->rmap->n, C->cmap->n, c->nz)); PetscCall(PetscInfo(C, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(C, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", c->rmax)); c->reallocs = 0; C->info.mallocs += 0; C->info.nz_unneeded = 0; C->assembled = C->was_assembled = PETSC_TRUE; C->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE(Mat C) { Mat_Product *product = C->product; Mat A, B; Mat_SeqAIJCUSPARSE *Acusp, *Bcusp, *Ccusp; Mat_SeqAIJ *a, *b, *c; Mat_SeqAIJCUSPARSEMultStruct *Amat, *Bmat, *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt i, j, m, n, k; PetscBool flg; cusparseStatus_t stat; MatProductType ptype; MatMatCusparse *mmdata; PetscLogDouble flops; PetscBool biscompressed, ciscompressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) int64_t C_num_rows1, C_num_cols1, C_nnz1; cusparseSpMatDescr_t BmatSpDescr; #else int cnz; #endif cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE, opB = CUSPARSE_OPERATION_NON_TRANSPOSE; /* cuSPARSE spgemm doesn't support transpose yet */ PetscFunctionBegin; MatCheckProduct(C, 1); PetscCheck(!C->product->data, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Product data not empty"); A = product->A; B = product->B; PetscCall(PetscObjectTypeCompare((PetscObject)A, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for type %s", ((PetscObject)A)->type_name); PetscCall(PetscObjectTypeCompare((PetscObject)B, MATSEQAIJCUSPARSE, &flg)); PetscCheck(flg, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Not for B of type %s", ((PetscObject)B)->type_name); a = (Mat_SeqAIJ *)A->data; b = (Mat_SeqAIJ *)B->data; /* product data */ PetscCall(PetscNew(&mmdata)); C->product->data = mmdata; C->product->destroy = MatDestroy_MatMatCusparse; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr; /* Access spptr after MatSeqAIJCUSPARSECopyToGPU, not before */ Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr; PetscCheck(Acusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); PetscCheck(Bcusp->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Only for MAT_CUSPARSE_CSR format"); ptype = product->type; if (A->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_AtB) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_A_is_symmetric = PETSC_TRUE; } if (B->symmetric == PETSC_BOOL3_TRUE && ptype == MATPRODUCT_ABt) { ptype = MATPRODUCT_AB; product->symbolic_used_the_fact_B_is_symmetric = PETSC_TRUE; } biscompressed = PETSC_FALSE; ciscompressed = PETSC_FALSE; switch (ptype) { case MATPRODUCT_AB: m = A->rmap->n; n = B->cmap->n; k = A->cmap->n; Amat = Acusp->mat; Bmat = Bcusp->mat; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_AtB: m = A->cmap->n; n = B->cmap->n; k = A->rmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); Amat = Acusp->matTranspose; Bmat = Bcusp->mat; if (b->compressedrow.use) biscompressed = PETSC_TRUE; break; case MATPRODUCT_ABt: m = A->rmap->n; n = B->rmap->n; k = A->cmap->n; PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); Amat = Acusp->mat; Bmat = Bcusp->matTranspose; if (a->compressedrow.use) ciscompressed = PETSC_TRUE; break; default: SETERRQ(PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Unsupported product type %s", MatProductTypes[product->type]); } /* create cusparse matrix */ PetscCall(MatSetSizes(C, m, n, m, n)); PetscCall(MatSetType(C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)C->data; Ccusp = (Mat_SeqAIJCUSPARSE *)C->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; c->compressedrow.use = ciscompressed; if (c->compressedrow.use) { /* if a is in compressed row, than c will be in compressed row format */ c->compressedrow.nrows = a->compressedrow.nrows; PetscCall(PetscMalloc2(c->compressedrow.nrows + 1, &c->compressedrow.i, c->compressedrow.nrows, &c->compressedrow.rindex)); PetscCall(PetscArraycpy(c->compressedrow.rindex, a->compressedrow.rindex, c->compressedrow.nrows)); Ccusp->workVector = new THRUSTARRAY(c->compressedrow.nrows); Cmat->cprowIndices = new THRUSTINTARRAY(c->compressedrow.nrows); Cmat->cprowIndices->assign(c->compressedrow.rindex, c->compressedrow.rindex + c->compressedrow.nrows); } else { c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Cmat->cprowIndices = NULL; } Ccusp->nrows = ciscompressed ? c->compressedrow.nrows : m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = Ccusp->nrows; Ccsr->num_cols = n; Ccsr->row_offsets = new THRUSTINTARRAY32(Ccusp->nrows + 1); PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); if (!Ccsr->num_rows || !Ccsr->num_cols || !a->nz || !b->nz) { /* cusparse raise errors in different calls when matrices have zero rows/columns! */ thrust::fill(thrust::device, Ccsr->row_offsets->begin(), Ccsr->row_offsets->end(), 0); c->nz = 0; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); goto finalizesym; } PetscCheck(Amat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A mult struct for product type %s", MatProductTypes[ptype]); PetscCheck(Bmat, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B mult struct for product type %s", MatProductTypes[ptype]); Acsr = (CsrMatrix *)Amat->mat; if (!biscompressed) { Bcsr = (CsrMatrix *)Bmat->mat; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) BmatSpDescr = Bmat->matDescr; #endif } else { /* we need to use row offsets for the full matrix */ CsrMatrix *cBcsr = (CsrMatrix *)Bmat->mat; Bcsr = new CsrMatrix; Bcsr->num_rows = B->rmap->n; Bcsr->num_cols = cBcsr->num_cols; Bcsr->num_entries = cBcsr->num_entries; Bcsr->column_indices = cBcsr->column_indices; Bcsr->values = cBcsr->values; if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Bcsr->row_offsets = Bcusp->rowoffsets_gpu; mmdata->Bcsr = Bcsr; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (Bcsr->num_rows && Bcsr->num_cols) { stat = cusparseCreateCsr(&mmdata->matSpBDescr, Bcsr->num_rows, Bcsr->num_cols, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Bcsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); } BmatSpDescr = mmdata->matSpBDescr; #endif } PetscCheck(Acsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing A CSR struct"); PetscCheck(Bcsr, PetscObjectComm((PetscObject)C), PETSC_ERR_GPU, "Missing B CSR struct"); /* precompute flops count */ if (ptype == MATPRODUCT_AB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt st = a->i[i]; const PetscInt en = a->i[i + 1]; for (j = st; j < en; j++) { const PetscInt brow = a->j[j]; flops += 2. * (b->i[brow + 1] - b->i[brow]); } } } else if (ptype == MATPRODUCT_AtB) { for (i = 0, flops = 0; i < A->rmap->n; i++) { const PetscInt anzi = a->i[i + 1] - a->i[i]; const PetscInt bnzi = b->i[i + 1] - b->i[i]; flops += (2. * anzi) * bnzi; } } else { /* TODO */ flops = 0.; } mmdata->flops = flops; PetscCall(PetscLogGpuTimeBegin()); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, 0, NULL, NULL, NULL, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); PetscCallCUSPARSE(cusparseSpGEMM_createDescr(&mmdata->spgemmDesc)); #if PETSC_PKG_CUDA_VERSION_GE(11, 4, 0) { /* cusparseSpGEMMreuse has more reasonable APIs than cusparseSpGEMM, so we prefer to use it. We follow the sample code at https://github.com/NVIDIA/CUDALibrarySamples/blob/master/cuSPARSE/spgemm_reuse */ void *dBuffer1 = NULL; void *dBuffer2 = NULL; void *dBuffer3 = NULL; /* dBuffer4, dBuffer5 are needed by cusparseSpGEMMreuse_compute, and therefore are stored in mmdata */ size_t bufferSize1 = 0; size_t bufferSize2 = 0; size_t bufferSize3 = 0; size_t bufferSize4 = 0; size_t bufferSize5 = 0; /* ask bufferSize1 bytes for external memory */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&dBuffer1, bufferSize1)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMMreuse_workEstimation(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize1, dBuffer1); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, NULL, &bufferSize3, NULL, &bufferSize4, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&dBuffer2, bufferSize2)); PetscCallCUDA(cudaMalloc((void **)&dBuffer3, bufferSize3)); PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer4, bufferSize4)); stat = cusparseSpGEMMreuse_nnz(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize2, dBuffer2, &bufferSize3, dBuffer3, &bufferSize4, mmdata->dBuffer4); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaFree(dBuffer1)); PetscCallCUDA(cudaFree(dBuffer2)); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; /* allocate matrix C */ Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ /* update matC with the new pointers */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&mmdata->dBuffer5, bufferSize5)); stat = cusparseSpGEMMreuse_copy(Ccusp->handle, opA, opB, Amat->matDescr, BmatSpDescr, Cmat->matDescr, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufferSize5, mmdata->dBuffer5); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaFree(dBuffer3)); stat = cusparseSpGEMMreuse_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufferSize4 / 1024, bufferSize5 / 1024)); } #else size_t bufSize2; /* ask bufferSize bytes for external memory */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, NULL); PetscCallCUSPARSE(stat); PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer2, bufSize2)); /* inspect the matrices A and B to understand the memory requirement for the next step */ stat = cusparseSpGEMM_workEstimation(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &bufSize2, mmdata->mmBuffer2); PetscCallCUSPARSE(stat); /* ask bufferSize again bytes for external memory */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, NULL); PetscCallCUSPARSE(stat); /* The CUSPARSE documentation is not clear, nor the API We need both buffers to perform the operations properly! mmdata->mmBuffer2 does not appear anywhere in the compute/copy API it only appears for the workEstimation stuff, but it seems it is needed in compute, so probably the address is stored in the descriptor! What a messy API... */ PetscCallCUDA(cudaMalloc((void **)&mmdata->mmBuffer, mmdata->mmBufferSize)); /* compute the intermediate product of A * B */ stat = cusparseSpGEMM_compute(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc, &mmdata->mmBufferSize, mmdata->mmBuffer); PetscCallCUSPARSE(stat); /* get matrix C non-zero entries C_nnz1 */ PetscCallCUSPARSE(cusparseSpMatGetSize(Cmat->matDescr, &C_num_rows1, &C_num_cols1, &C_nnz1)); c->nz = (PetscInt)C_nnz1; PetscCall(PetscInfo(C, "Buffer sizes for type %s, result %" PetscInt_FMT " x %" PetscInt_FMT " (k %" PetscInt_FMT ", nzA %" PetscInt_FMT ", nzB %" PetscInt_FMT ", nzC %" PetscInt_FMT ") are: %ldKB %ldKB\n", MatProductTypes[ptype], m, n, k, a->nz, b->nz, c->nz, bufSize2 / 1024, mmdata->mmBufferSize / 1024)); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ stat = cusparseCsrSetPointers(Cmat->matDescr, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get()); PetscCallCUSPARSE(stat); stat = cusparseSpGEMM_copy(Ccusp->handle, opA, opB, Cmat->alpha_one, Amat->matDescr, BmatSpDescr, Cmat->beta_zero, Cmat->matDescr, cusparse_scalartype, CUSPARSE_SPGEMM_DEFAULT, mmdata->spgemmDesc); PetscCallCUSPARSE(stat); #endif // PETSC_PKG_CUDA_VERSION_GE(11,4,0) #else PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_HOST)); stat = cusparseXcsrgemmNnz(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->row_offsets->data().get(), &cnz); PetscCallCUSPARSE(stat); c->nz = cnz; Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ Ccsr->values = new THRUSTARRAY(c->nz); PetscCallCUDA(cudaPeekAtLastError()); /* catch out of memory errors */ PetscCallCUSPARSE(cusparseSetPointerMode(Ccusp->handle, CUSPARSE_POINTER_MODE_DEVICE)); /* with the old gemm interface (removed from 11.0 on) we cannot compute the symbolic factorization only. I have tried using the gemm2 interface (alpha * A * B + beta * D), which allows to do symbolic by passing NULL for values, but it seems quite buggy when D is NULL, despite the fact that CUSPARSE documentation claims it is supported! */ stat = cusparse_csr_spgemm(Ccusp->handle, opA, opB, Acsr->num_rows, Bcsr->num_cols, Acsr->num_cols, Amat->descr, Acsr->num_entries, Acsr->values->data().get(), Acsr->row_offsets->data().get(), Acsr->column_indices->data().get(), Bmat->descr, Bcsr->num_entries, Bcsr->values->data().get(), Bcsr->row_offsets->data().get(), Bcsr->column_indices->data().get(), Cmat->descr, Ccsr->values->data().get(), Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get()); PetscCallCUSPARSE(stat); #endif PetscCall(PetscLogGpuFlops(mmdata->flops)); PetscCall(PetscLogGpuTimeEnd()); finalizesym: c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ PetscInt *d_i = c->i; THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(cudaMemcpy(d_i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } else { PetscInt *d_i = c->i; if (ciscompressed) d_i = c->compressedrow.i; PetscCallCUDA(cudaMemcpy(d_i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } if (ciscompressed) { /* need to expand host row offsets */ PetscInt r = 0; c->i[0] = 0; for (k = 0; k < c->compressedrow.nrows; k++) { const PetscInt next = c->compressedrow.rindex[k]; const PetscInt old = c->compressedrow.i[k]; for (; r < next; r++) c->i[r + 1] = old; } for (; r < m; r++) c->i[r + 1] = c->compressedrow.i[c->compressedrow.nrows]; } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (k = 0; k < m; k++) { const PetscInt nn = c->i[k + 1] - c->i[k]; c->ilen[k] = c->imax[k] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(C)); PetscCall(PetscMalloc1(c->nz, &c->a)); Ccsr->num_entries = c->nz; C->nonzerostate++; PetscCall(PetscLayoutSetUp(C->rmap)); PetscCall(PetscLayoutSetUp(C->cmap)); Ccusp->nonzerostate = C->nonzerostate; C->offloadmask = PETSC_OFFLOAD_UNALLOCATED; C->preallocated = PETSC_TRUE; C->assembled = PETSC_FALSE; C->was_assembled = PETSC_FALSE; if (product->api_user && A->offloadmask == PETSC_OFFLOAD_BOTH && B->offloadmask == PETSC_OFFLOAD_BOTH) { /* flag the matrix C values as computed, so that the numeric phase will only call MatAssembly */ mmdata->reusesym = PETSC_TRUE; C->offloadmask = PETSC_OFFLOAD_GPU; } C->ops->productnumeric = MatProductNumeric_SeqAIJCUSPARSE_SeqAIJCUSPARSE; PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatProductSetFromOptions_SeqAIJ_SeqDense(Mat); /* handles sparse or dense B */ static PetscErrorCode MatProductSetFromOptions_SeqAIJCUSPARSE(Mat mat) { Mat_Product *product = mat->product; PetscBool isdense = PETSC_FALSE, Biscusp = PETSC_FALSE, Ciscusp = PETSC_TRUE; PetscFunctionBegin; MatCheckProduct(mat, 1); PetscCall(PetscObjectBaseTypeCompare((PetscObject)product->B, MATSEQDENSE, &isdense)); if (!product->A->boundtocpu && !product->B->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->B, MATSEQAIJCUSPARSE, &Biscusp)); if (product->type == MATPRODUCT_ABC) { Ciscusp = PETSC_FALSE; if (!product->C->boundtocpu) PetscCall(PetscObjectTypeCompare((PetscObject)product->C, MATSEQAIJCUSPARSE, &Ciscusp)); } if (Biscusp && Ciscusp) { /* we can always select the CPU backend */ PetscBool usecpu = PETSC_FALSE; switch (product->type) { case MATPRODUCT_AB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmult_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_AtB: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatTransposeMatMult", "Mat"); PetscCall(PetscOptionsBool("-mattransposematmult_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_AtB", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatTransposeMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_PtAP: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatPtAP", "Mat"); PetscCall(PetscOptionsBool("-matptap_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_PtAP", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatPtAP", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_RARt: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatRARt", "Mat"); PetscCall(PetscOptionsBool("-matrart_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_RARt", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatRARt", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; case MATPRODUCT_ABC: if (product->api_user) { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatMatMatMult", "Mat"); PetscCall(PetscOptionsBool("-matmatmatmult_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } else { PetscOptionsBegin(PetscObjectComm((PetscObject)mat), ((PetscObject)mat)->prefix, "MatProduct_ABC", "Mat"); PetscCall(PetscOptionsBool("-mat_product_algorithm_backend_cpu", "Use CPU code", "MatMatMatMult", usecpu, &usecpu, NULL)); PetscOptionsEnd(); } break; default: break; } if (usecpu) Biscusp = Ciscusp = PETSC_FALSE; } /* dispatch */ if (isdense) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: case MATPRODUCT_PtAP: case MATPRODUCT_RARt: if (product->A->boundtocpu) { PetscCall(MatProductSetFromOptions_SeqAIJ_SeqDense(mat)); } else { mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqDENSECUDA; } break; case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else if (Biscusp && Ciscusp) { switch (product->type) { case MATPRODUCT_AB: case MATPRODUCT_AtB: case MATPRODUCT_ABt: mat->ops->productsymbolic = MatProductSymbolic_SeqAIJCUSPARSE_SeqAIJCUSPARSE; break; case MATPRODUCT_PtAP: case MATPRODUCT_RARt: case MATPRODUCT_ABC: mat->ops->productsymbolic = MatProductSymbolic_ABC_Basic; break; default: break; } } else { /* fallback for AIJ */ PetscCall(MatProductSetFromOptions_SeqAIJ(mat)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMult_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_FALSE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultHermitianTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTranspose_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, NULL, yy, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void ScatterAdd(PetscInt n, PetscInt *idx, const PetscScalar *x, PetscScalar *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) y[idx[i]] += x[i]; } /* z = op(A) x + y. If trans & !herm, op = ^T; if trans & herm, op = ^H; if !trans, op = no-op */ static PetscErrorCode MatMultAddKernel_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz, PetscBool trans, PetscBool herm) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *cusparsestruct = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJCUSPARSEMultStruct *matstruct; PetscScalar *xarray, *zarray, *dptr, *beta, *xptr; cusparseOperation_t opA = CUSPARSE_OPERATION_NON_TRANSPOSE; PetscBool compressed; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscInt nx, ny; #endif PetscFunctionBegin; PetscCheck(!herm || trans, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "Hermitian and not transpose not supported"); if (!a->nz) { if (yy) PetscCall(VecSeq_CUDA::Copy(yy, zz)); else PetscCall(VecSeq_CUDA::Set(zz, 0)); PetscFunctionReturn(PETSC_SUCCESS); } /* The line below is necessary due to the operations that modify the matrix on the CPU (axpy, scale, etc) */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); if (!trans) { matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; PetscCheck(matstruct, PetscObjectComm((PetscObject)A), PETSC_ERR_GPU, "SeqAIJCUSPARSE does not have a 'mat' (need to fix)"); } else { if (herm || !A->form_explicit_transpose) { opA = herm ? CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE : CUSPARSE_OPERATION_TRANSPOSE; matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->mat; } else { if (!cusparsestruct->matTranspose) PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); matstruct = (Mat_SeqAIJCUSPARSEMultStruct *)cusparsestruct->matTranspose; } } /* Does the matrix use compressed rows (i.e., drop zero rows)? */ compressed = matstruct->cprowIndices ? PETSC_TRUE : PETSC_FALSE; try { PetscCall(VecCUDAGetArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDAGetArray(zz, &zarray)); /* read & write zz, so need to get up-to-date zarray on GPU */ else PetscCall(VecCUDAGetArrayWrite(zz, &zarray)); /* write zz, so no need to init zarray on GPU */ PetscCall(PetscLogGpuTimeBegin()); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { /* z = A x + beta y. If A is compressed (with less rows), then Ax is shorter than the full z, so we need a work vector to store Ax. When A is non-compressed, and z = y, we can set beta=1 to compute y = Ax + y in one call. */ xptr = xarray; dptr = compressed ? cusparsestruct->workVector->data().get() : zarray; beta = (yy == zz && !compressed) ? matstruct->beta_one : matstruct->beta_zero; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) /* Get length of x, y for y=Ax. ny might be shorter than the work vector's allocated length, since the work vector is allocated to accommodate different uses. So we get the length info directly from mat. */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_cols; ny = mat->num_rows; } #endif } else { /* z = A^T x + beta y If A is compressed, then we need a work vector as the shorter version of x to compute A^T x. Note A^Tx is of full length, so we set beta to 1.0 if y exists. */ xptr = compressed ? cusparsestruct->workVector->data().get() : xarray; dptr = zarray; beta = yy ? matstruct->beta_one : matstruct->beta_zero; if (compressed) { /* Scatter x to work vector */ thrust::device_ptr<PetscScalar> xarr = thrust::device_pointer_cast(xarray); thrust::for_each( #if PetscDefined(HAVE_THRUST_ASYNC) thrust::cuda::par.on(PetscDefaultCudaStream), #endif thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(xarr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAEqualsReverse()); } #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if (cusparsestruct->format == MAT_CUSPARSE_CSR) { CsrMatrix *mat = (CsrMatrix *)matstruct->mat; nx = mat->num_rows; ny = mat->num_cols; } #endif } /* csr_spmv does y = alpha op(A) x + beta y */ if (cusparsestruct->format == MAT_CUSPARSE_CSR) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCheck(opA >= 0 && opA <= 2, PETSC_COMM_SELF, PETSC_ERR_SUP, "cuSPARSE ABI on cusparseOperation_t has changed and PETSc has not been updated accordingly"); if (!matstruct->cuSpMV[opA].initialized) { /* built on demand */ PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecXDescr, nx, xptr, cusparse_scalartype)); PetscCallCUSPARSE(cusparseCreateDnVec(&matstruct->cuSpMV[opA].vecYDescr, ny, dptr, cusparse_scalartype)); PetscCallCUSPARSE( cusparseSpMV_bufferSize(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, &matstruct->cuSpMV[opA].spmvBufferSize)); PetscCallCUDA(cudaMalloc(&matstruct->cuSpMV[opA].spmvBuffer, matstruct->cuSpMV[opA].spmvBufferSize)); matstruct->cuSpMV[opA].initialized = PETSC_TRUE; } else { /* x, y's value pointers might change between calls, but their shape is kept, so we just update pointers */ PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecXDescr, xptr)); PetscCallCUSPARSE(cusparseDnVecSetValues(matstruct->cuSpMV[opA].vecYDescr, dptr)); } PetscCallCUSPARSE(cusparseSpMV(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->matDescr, /* built in MatSeqAIJCUSPARSECopyToGPU() or MatSeqAIJCUSPARSEFormExplicitTranspose() */ matstruct->cuSpMV[opA].vecXDescr, beta, matstruct->cuSpMV[opA].vecYDescr, cusparse_scalartype, cusparsestruct->spmvAlg, matstruct->cuSpMV[opA].spmvBuffer)); #else CsrMatrix *mat = (CsrMatrix *)matstruct->mat; PetscCallCUSPARSE(cusparse_csr_spmv(cusparsestruct->handle, opA, mat->num_rows, mat->num_cols, mat->num_entries, matstruct->alpha_one, matstruct->descr, mat->values->data().get(), mat->row_offsets->data().get(), mat->column_indices->data().get(), xptr, beta, dptr)); #endif } else { if (cusparsestruct->nrows) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)matstruct->mat; PetscCallCUSPARSE(cusparse_hyb_spmv(cusparsestruct->handle, opA, matstruct->alpha_one, matstruct->descr, hybMat, xptr, beta, dptr)); #endif } } PetscCall(PetscLogGpuTimeEnd()); if (opA == CUSPARSE_OPERATION_NON_TRANSPOSE) { if (yy) { /* MatMultAdd: zz = A*xx + yy */ if (compressed) { /* A is compressed. We first copy yy to zz, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Copy(yy, zz)); /* zz = yy */ } else if (zz != yy) { /* A is not compressed. zz already contains A*xx, and we just need to add yy */ PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } } else if (compressed) { /* MatMult: zz = A*xx. A is compressed, so we zero zz first, then ScatterAdd the work vector to zz */ PetscCall(VecSeq_CUDA::Set(zz, 0)); } /* ScatterAdd the result from work vector into the full vector when A is compressed */ if (compressed) { PetscCall(PetscLogGpuTimeBegin()); /* I wanted to make this for_each asynchronous but failed. thrust::async::for_each() returns an event (internally registered) and in the destructor of the scope, it will call cudaStreamSynchronize() on this stream. One has to store all events to prevent that. So I just add a ScatterAdd kernel. */ #if 0 thrust::device_ptr<PetscScalar> zptr = thrust::device_pointer_cast(zarray); thrust::async::for_each(thrust::cuda::par.on(cusparsestruct->stream), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))), thrust::make_zip_iterator(thrust::make_tuple(cusparsestruct->workVector->begin(), thrust::make_permutation_iterator(zptr, matstruct->cprowIndices->begin()))) + matstruct->cprowIndices->size(), VecCUDAPlusEquals()); #else PetscInt n = matstruct->cprowIndices->size(); ScatterAdd<<<(n + 255) / 256, 256, 0, PetscDefaultCudaStream>>>(n, matstruct->cprowIndices->data().get(), cusparsestruct->workVector->data().get(), zarray); #endif PetscCall(PetscLogGpuTimeEnd()); } } else { if (yy && yy != zz) PetscCall(VecSeq_CUDA::AXPY(zz, 1.0, yy)); /* zz += yy */ } PetscCall(VecCUDARestoreArrayRead(xx, (const PetscScalar **)&xarray)); if (yy == zz) PetscCall(VecCUDARestoreArray(zz, &zarray)); else PetscCall(VecCUDARestoreArrayWrite(zz, &zarray)); } catch (char *ex) { SETERRQ(PETSC_COMM_SELF, PETSC_ERR_LIB, "CUSPARSE error: %s", ex); } if (yy) { PetscCall(PetscLogGpuFlops(2.0 * a->nz)); } else { PetscCall(PetscLogGpuFlops(2.0 * a->nz - a->nonzerorowcnt)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatMultTransposeAdd_SeqAIJCUSPARSE(Mat A, Vec xx, Vec yy, Vec zz) { PetscFunctionBegin; PetscCall(MatMultAddKernel_SeqAIJCUSPARSE(A, xx, yy, zz, PETSC_TRUE, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAssemblyEnd_SeqAIJCUSPARSE(Mat A, MatAssemblyType mode) { PetscObjectState onnz = A->nonzerostate; Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCall(MatAssemblyEnd_SeqAIJ(A, mode)); if (onnz != A->nonzerostate && cusp->deviceMat) { PetscCall(PetscInfo(A, "Destroy device mat since nonzerostate changed\n")); PetscCallCUDA(cudaFree(cusp->deviceMat)); cusp->deviceMat = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } /*@ MatCreateSeqAIJCUSPARSE - Creates a sparse matrix in `MATAIJCUSPARSE` (compressed row) format (the default parallel PETSc format). This matrix will ultimately pushed down to NVIDIA GPUs and use the CuSPARSE library for calculations. For good matrix assembly performance the user should preallocate the matrix storage by setting the parameter nz (or the array nnz). By setting these parameters accurately, performance during matrix assembly can be increased by more than a factor of 50. Collective Input Parameters: + comm - MPI communicator, set to `PETSC_COMM_SELF` . m - number of rows . n - number of columns . nz - number of nonzeros per row (same for all rows) - nnz - array containing the number of nonzeros in the various rows (possibly different for each row) or `NULL` Output Parameter: . A - the matrix Level: intermediate Notes: It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`, MatXXXXSetPreallocation() paradgm instead of this routine directly. [MatXXXXSetPreallocation() is, for example, `MatSeqAIJSetPreallocation()`] If `nnz` is given then `nz` is ignored The AIJ format, also called compressed row storage, is fully compatible with standard Fortran storage. That is, the stored row and column indices can begin at either one (as in Fortran) or zero. See the users' manual for details. Specify the preallocated storage with either nz or nnz (not both). Set `nz` = `PETSC_DEFAULT` and `nnz` = `NULL` for PETSc to control dynamic memory allocation. For large problems you MUST preallocate memory or you will get TERRIBLE performance, see the users' manual chapter on matrices. By default, this format uses inodes (identical nodes) when possible, to improve numerical efficiency of matrix-vector products and solves. We search for consecutive rows with the same nonzero structure, thereby reusing matrix information to achieve increased efficiency. .seealso: [](chapter_matrices), `Mat`, `MATSEQAIJCUSPARSE`, `MatCreate()`, `MatCreateAIJ()`, `MatSetValues()`, `MatSeqAIJSetColumnIndices()`, `MatCreateSeqAIJWithArrays()`, `MatCreateAIJ()`, `MATSEQAIJCUSPARSE`, `MATAIJCUSPARSE` @*/ PetscErrorCode MatCreateSeqAIJCUSPARSE(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt nz, const PetscInt nnz[], Mat *A) { PetscFunctionBegin; PetscCall(MatCreate(comm, A)); PetscCall(MatSetSizes(*A, m, n, m, n)); PetscCall(MatSetType(*A, MATSEQAIJCUSPARSE)); PetscCall(MatSeqAIJSetPreallocation_SeqAIJ(*A, nz, (PetscInt *)nnz)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatDestroy_SeqAIJCUSPARSE(Mat A) { PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { PetscCall(MatSeqAIJCUSPARSE_Destroy((Mat_SeqAIJCUSPARSE **)&A->spptr)); } else { PetscCall(MatSeqAIJCUSPARSETriFactors_Destroy((Mat_SeqAIJCUSPARSETriFactors **)&A->spptr)); } PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetFormat_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatCUSPARSESetUseCPUSolve_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatFactorGetSolverType_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatConvert_seqaijcusparse_hypre_C", NULL)); PetscCall(MatDestroy_SeqAIJ(A)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat, MatType, MatReuse, Mat *); static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat, PetscBool); static PetscErrorCode MatDuplicate_SeqAIJCUSPARSE(Mat A, MatDuplicateOption cpvalues, Mat *B) { PetscFunctionBegin; PetscCall(MatDuplicate_SeqAIJ(A, cpvalues, B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(*B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, B)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatAXPY_SeqAIJCUSPARSE(Mat Y, PetscScalar a, Mat X, MatStructure str) { Mat_SeqAIJ *x = (Mat_SeqAIJ *)X->data, *y = (Mat_SeqAIJ *)Y->data; Mat_SeqAIJCUSPARSE *cy; Mat_SeqAIJCUSPARSE *cx; PetscScalar *ay; const PetscScalar *ax; CsrMatrix *csry, *csrx; PetscFunctionBegin; cy = (Mat_SeqAIJCUSPARSE *)Y->spptr; cx = (Mat_SeqAIJCUSPARSE *)X->spptr; if (X->ops->axpy != Y->ops->axpy) { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); PetscFunctionReturn(PETSC_SUCCESS); } /* if we are here, it means both matrices are bound to GPU */ PetscCall(MatSeqAIJCUSPARSECopyToGPU(Y)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(X)); PetscCheck(cy->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)Y), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); PetscCheck(cx->format == MAT_CUSPARSE_CSR, PetscObjectComm((PetscObject)X), PETSC_ERR_GPU, "only MAT_CUSPARSE_CSR supported"); csry = (CsrMatrix *)cy->mat->mat; csrx = (CsrMatrix *)cx->mat->mat; /* see if we can turn this into a cublas axpy */ if (str != SAME_NONZERO_PATTERN && x->nz == y->nz && !x->compressedrow.use && !y->compressedrow.use) { bool eq = thrust::equal(thrust::device, csry->row_offsets->begin(), csry->row_offsets->end(), csrx->row_offsets->begin()); if (eq) eq = thrust::equal(thrust::device, csry->column_indices->begin(), csry->column_indices->end(), csrx->column_indices->begin()); if (eq) str = SAME_NONZERO_PATTERN; } /* spgeam is buggy with one column */ if (Y->cmap->n == 1 && str != SAME_NONZERO_PATTERN) str = DIFFERENT_NONZERO_PATTERN; if (str == SUBSET_NONZERO_PATTERN) { PetscScalar b = 1.0; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) size_t bufferSize; void *buffer; #endif PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_HOST)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) PetscCallCUSPARSE(cusparse_csr_spgeam_bufferSize(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), &bufferSize)); PetscCallCUDA(cudaMalloc(&buffer, bufferSize)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), buffer)); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUDA(cudaFree(buffer)); #else PetscCall(PetscLogGpuTimeBegin()); PetscCallCUSPARSE(cusparse_csr_spgeam(cy->handle, Y->rmap->n, Y->cmap->n, &a, cx->mat->descr, x->nz, ax, csrx->row_offsets->data().get(), csrx->column_indices->data().get(), &b, cy->mat->descr, y->nz, ay, csry->row_offsets->data().get(), csry->column_indices->data().get(), cy->mat->descr, ay, csry->row_offsets->data().get(), csry->column_indices->data().get())); PetscCall(PetscLogGpuFlops(x->nz + y->nz)); PetscCall(PetscLogGpuTimeEnd()); #endif PetscCallCUSPARSE(cusparseSetPointerMode(cy->handle, CUSPARSE_POINTER_MODE_DEVICE)); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else if (str == SAME_NONZERO_PATTERN) { cublasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscCall(MatSeqAIJCUSPARSEGetArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(x->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXaxpy(cublasv2handle, bnz, &a, ax, one, ay, one)); PetscCall(PetscLogGpuFlops(2.0 * bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(X, &ax)); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); } else { PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(Y, PETSC_FALSE)); PetscCall(MatAXPY_SeqAIJ(Y, a, X, str)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatScale_SeqAIJCUSPARSE(Mat Y, PetscScalar a) { Mat_SeqAIJ *y = (Mat_SeqAIJ *)Y->data; PetscScalar *ay; cublasHandle_t cublasv2handle; PetscBLASInt one = 1, bnz = 1; PetscFunctionBegin; PetscCall(MatSeqAIJCUSPARSEGetArray(Y, &ay)); PetscCall(PetscCUBLASGetHandle(&cublasv2handle)); PetscCall(PetscBLASIntCast(y->nz, &bnz)); PetscCall(PetscLogGpuTimeBegin()); PetscCallCUBLAS(cublasXscal(cublasv2handle, bnz, &a, ay, one)); PetscCall(PetscLogGpuFlops(bnz)); PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqAIJCUSPARSERestoreArray(Y, &ay)); PetscCall(MatSeqAIJInvalidateDiagonal(Y)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatZeroEntries_SeqAIJCUSPARSE(Mat A) { PetscBool both = PETSC_FALSE; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr = (Mat_SeqAIJCUSPARSE *)A->spptr; if (spptr->mat) { CsrMatrix *matrix = (CsrMatrix *)spptr->mat->mat; if (matrix->values) { both = PETSC_TRUE; thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } if (spptr->matTranspose) { CsrMatrix *matrix = (CsrMatrix *)spptr->matTranspose->mat; if (matrix->values) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); } } PetscCall(PetscArrayzero(a->a, a->i[A->rmap->n])); PetscCall(MatSeqAIJInvalidateDiagonal(A)); if (both) A->offloadmask = PETSC_OFFLOAD_BOTH; else A->offloadmask = PETSC_OFFLOAD_CPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatBindToCPU_SeqAIJCUSPARSE(Mat A, PetscBool flg) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; if (A->factortype != MAT_FACTOR_NONE) { A->boundtocpu = flg; PetscFunctionReturn(PETSC_SUCCESS); } if (flg) { PetscCall(MatSeqAIJCUSPARSECopyFromGPU(A)); A->ops->scale = MatScale_SeqAIJ; A->ops->axpy = MatAXPY_SeqAIJ; A->ops->zeroentries = MatZeroEntries_SeqAIJ; A->ops->mult = MatMult_SeqAIJ; A->ops->multadd = MatMultAdd_SeqAIJ; A->ops->multtranspose = MatMultTranspose_SeqAIJ; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJ; A->ops->multhermitiantranspose = NULL; A->ops->multhermitiantransposeadd = NULL; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJ; PetscCall(PetscMemzero(a->ops, sizeof(Mat_SeqAIJOps))); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", NULL)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", NULL)); } else { A->ops->scale = MatScale_SeqAIJCUSPARSE; A->ops->axpy = MatAXPY_SeqAIJCUSPARSE; A->ops->zeroentries = MatZeroEntries_SeqAIJCUSPARSE; A->ops->mult = MatMult_SeqAIJCUSPARSE; A->ops->multadd = MatMultAdd_SeqAIJCUSPARSE; A->ops->multtranspose = MatMultTranspose_SeqAIJCUSPARSE; A->ops->multtransposeadd = MatMultTransposeAdd_SeqAIJCUSPARSE; A->ops->multhermitiantranspose = MatMultHermitianTranspose_SeqAIJCUSPARSE; A->ops->multhermitiantransposeadd = MatMultHermitianTransposeAdd_SeqAIJCUSPARSE; A->ops->productsetfromoptions = MatProductSetFromOptions_SeqAIJCUSPARSE; a->ops->getarray = MatSeqAIJGetArray_SeqAIJCUSPARSE; a->ops->restorearray = MatSeqAIJRestoreArray_SeqAIJCUSPARSE; a->ops->getarrayread = MatSeqAIJGetArrayRead_SeqAIJCUSPARSE; a->ops->restorearrayread = MatSeqAIJRestoreArrayRead_SeqAIJCUSPARSE; a->ops->getarraywrite = MatSeqAIJGetArrayWrite_SeqAIJCUSPARSE; a->ops->restorearraywrite = MatSeqAIJRestoreArrayWrite_SeqAIJCUSPARSE; a->ops->getcsrandmemtype = MatSeqAIJGetCSRAndMemType_SeqAIJCUSPARSE; PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSeqAIJCopySubArray_C", MatSeqAIJCopySubArray_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdensecuda_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqdense_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetPreallocationCOO_C", MatSetPreallocationCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatSetValuesCOO_C", MatSetValuesCOO_SeqAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)A, "MatProductSetFromOptions_seqaijcusparse_seqaijcusparse_C", MatProductSetFromOptions_SeqAIJCUSPARSE)); } A->boundtocpu = flg; if (flg && a->inode.size) { a->inode.use = PETSC_TRUE; } else { a->inode.use = PETSC_FALSE; } PetscFunctionReturn(PETSC_SUCCESS); } PETSC_INTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCUSPARSE(Mat A, MatType, MatReuse reuse, Mat *newmat) { Mat B; PetscFunctionBegin; PetscCall(PetscDeviceInitialize(PETSC_DEVICE_CUDA)); /* first use of CUSPARSE may be via MatConvert */ if (reuse == MAT_INITIAL_MATRIX) { PetscCall(MatDuplicate(A, MAT_COPY_VALUES, newmat)); } else if (reuse == MAT_REUSE_MATRIX) { PetscCall(MatCopy(A, *newmat, SAME_NONZERO_PATTERN)); } B = *newmat; PetscCall(PetscFree(B->defaultvectype)); PetscCall(PetscStrallocpy(VECCUDA, &B->defaultvectype)); if (reuse != MAT_REUSE_MATRIX && !B->spptr) { if (B->factortype == MAT_FACTOR_NONE) { Mat_SeqAIJCUSPARSE *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(cusparseCreate(&spptr->handle)); PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream)); spptr->format = MAT_CUSPARSE_CSR; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) #if CUSPARSE_VERSION > 11301 spptr->spmvAlg = CUSPARSE_SPMV_CSR_ALG1; /* default, since we only support csr */ #else spptr->spmvAlg = CUSPARSE_CSRMV_ALG1; /* default, since we only support csr */ #endif spptr->spmmAlg = CUSPARSE_SPMM_CSR_ALG1; /* default, only support column-major dense matrix B */ spptr->csr2cscAlg = CUSPARSE_CSR2CSC_ALG1; #endif B->spptr = spptr; } else { Mat_SeqAIJCUSPARSETriFactors *spptr; PetscCall(PetscNew(&spptr)); PetscCallCUSPARSE(cusparseCreate(&spptr->handle)); PetscCallCUSPARSE(cusparseSetStream(spptr->handle, PetscDefaultCudaStream)); B->spptr = spptr; } B->offloadmask = PETSC_OFFLOAD_UNALLOCATED; } B->ops->assemblyend = MatAssemblyEnd_SeqAIJCUSPARSE; B->ops->destroy = MatDestroy_SeqAIJCUSPARSE; B->ops->setoption = MatSetOption_SeqAIJCUSPARSE; B->ops->setfromoptions = MatSetFromOptions_SeqAIJCUSPARSE; B->ops->bindtocpu = MatBindToCPU_SeqAIJCUSPARSE; B->ops->duplicate = MatDuplicate_SeqAIJCUSPARSE; PetscCall(MatBindToCPU_SeqAIJCUSPARSE(B, PETSC_FALSE)); PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATSEQAIJCUSPARSE)); PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetFormat_C", MatCUSPARSESetFormat_SeqAIJCUSPARSE)); #if defined(PETSC_HAVE_HYPRE) PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_seqaijcusparse_hypre_C", MatConvert_AIJ_HYPRE)); #endif PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatCUSPARSESetUseCPUSolve_C", MatCUSPARSESetUseCPUSolve_SeqAIJCUSPARSE)); PetscFunctionReturn(PETSC_SUCCESS); } PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCUSPARSE(Mat B) { PetscFunctionBegin; PetscCall(MatCreate_SeqAIJ(B)); PetscCall(MatConvert_SeqAIJ_SeqAIJCUSPARSE(B, MATSEQAIJCUSPARSE, MAT_INPLACE_MATRIX, &B)); PetscFunctionReturn(PETSC_SUCCESS); } /*MC MATSEQAIJCUSPARSE - MATAIJCUSPARSE = "(seq)aijcusparse" - A matrix type to be used for sparse matrices. A matrix type type whose data resides on NVIDIA GPUs. These matrices can be in either CSR, ELL, or Hybrid format. All matrix calculations are performed on NVIDIA GPUs using the CuSPARSE library. Options Database Keys: + -mat_type aijcusparse - sets the matrix type to "seqaijcusparse" during a call to `MatSetFromOptions()` . -mat_cusparse_storage_format csr - sets the storage format of matrices (for `MatMult()` and factors in `MatSolve()`). Other options include ell (ellpack) or hyb (hybrid). . -mat_cusparse_mult_storage_format csr - sets the storage format of matrices (for `MatMult()`). Other options include ell (ellpack) or hyb (hybrid). - -mat_cusparse_use_cpu_solve - Do `MatSolve()` on CPU Level: beginner .seealso: [](chapter_matrices), `Mat`, `MatCreateSeqAIJCUSPARSE()`, `MatCUSPARSESetUseCPUSolve()`, `MATAIJCUSPARSE`, `MatCreateAIJCUSPARSE()`, `MatCUSPARSESetFormat()`, `MatCUSPARSEStorageFormat`, `MatCUSPARSEFormatOperation` M*/ PETSC_EXTERN PetscErrorCode MatGetFactor_seqaijcusparse_cusparse_band(Mat, MatFactorType, Mat *); PETSC_EXTERN PetscErrorCode MatSolverTypeRegister_CUSPARSE(void) { PetscFunctionBegin; PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSEBAND, MATSEQAIJ, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse_band)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_LU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_CHOLESKY, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ILU, MatGetFactor_seqaijcusparse_cusparse)); PetscCall(MatSolverTypeRegister(MATSOLVERCUSPARSE, MATSEQAIJCUSPARSE, MAT_FACTOR_ICC, MatGetFactor_seqaijcusparse_cusparse)); PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatResetPreallocationCOO_SeqAIJCUSPARSE(Mat mat) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)mat->spptr; PetscFunctionBegin; if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; if (cusp->use_extended_coo) { PetscCallCUDA(cudaFree(cusp->jmap_d)); PetscCallCUDA(cudaFree(cusp->perm_d)); } cusp->use_extended_coo = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSE_Destroy(Mat_SeqAIJCUSPARSE **cusparsestruct) { PetscFunctionBegin; if (*cusparsestruct) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->mat, (*cusparsestruct)->format)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&(*cusparsestruct)->matTranspose, (*cusparsestruct)->format)); delete (*cusparsestruct)->workVector; delete (*cusparsestruct)->rowoffsets_gpu; delete (*cusparsestruct)->cooPerm; delete (*cusparsestruct)->cooPerm_a; delete (*cusparsestruct)->csr2csc_i; if ((*cusparsestruct)->handle) PetscCallCUSPARSE(cusparseDestroy((*cusparsestruct)->handle)); if ((*cusparsestruct)->jmap_d) PetscCallCUDA(cudaFree((*cusparsestruct)->jmap_d)); if ((*cusparsestruct)->perm_d) PetscCallCUDA(cudaFree((*cusparsestruct)->perm_d)); PetscCall(PetscFree(*cusparsestruct)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode CsrMatrix_Destroy(CsrMatrix **mat) { PetscFunctionBegin; if (*mat) { delete (*mat)->values; delete (*mat)->column_indices; delete (*mat)->row_offsets; delete *mat; *mat = 0; } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSETriFactorStruct **trifactor) { PetscFunctionBegin; if (*trifactor) { if ((*trifactor)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*trifactor)->descr)); if ((*trifactor)->solveInfo) PetscCallCUSPARSE(cusparseDestroyCsrsvInfo((*trifactor)->solveInfo)); PetscCall(CsrMatrix_Destroy(&(*trifactor)->csrMat)); if ((*trifactor)->solveBuffer) PetscCallCUDA(cudaFree((*trifactor)->solveBuffer)); if ((*trifactor)->AA_h) PetscCallCUDA(cudaFreeHost((*trifactor)->AA_h)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) if ((*trifactor)->csr2cscBuffer) PetscCallCUDA(cudaFree((*trifactor)->csr2cscBuffer)); #endif PetscCall(PetscFree(*trifactor)); } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSEMultStruct_Destroy(Mat_SeqAIJCUSPARSEMultStruct **matstruct, MatCUSPARSEStorageFormat format) { CsrMatrix *mat; PetscFunctionBegin; if (*matstruct) { if ((*matstruct)->mat) { if (format == MAT_CUSPARSE_ELL || format == MAT_CUSPARSE_HYB) { #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_CUSPARSE_ELL and MAT_CUSPARSE_HYB are not supported since CUDA-11.0"); #else cusparseHybMat_t hybMat = (cusparseHybMat_t)(*matstruct)->mat; PetscCallCUSPARSE(cusparseDestroyHybMat(hybMat)); #endif } else { mat = (CsrMatrix *)(*matstruct)->mat; PetscCall(CsrMatrix_Destroy(&mat)); } } if ((*matstruct)->descr) PetscCallCUSPARSE(cusparseDestroyMatDescr((*matstruct)->descr)); delete (*matstruct)->cprowIndices; if ((*matstruct)->alpha_one) PetscCallCUDA(cudaFree((*matstruct)->alpha_one)); if ((*matstruct)->beta_zero) PetscCallCUDA(cudaFree((*matstruct)->beta_zero)); if ((*matstruct)->beta_one) PetscCallCUDA(cudaFree((*matstruct)->beta_one)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) Mat_SeqAIJCUSPARSEMultStruct *mdata = *matstruct; if (mdata->matDescr) PetscCallCUSPARSE(cusparseDestroySpMat(mdata->matDescr)); for (int i = 0; i < 3; i++) { if (mdata->cuSpMV[i].initialized) { PetscCallCUDA(cudaFree(mdata->cuSpMV[i].spmvBuffer)); PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecXDescr)); PetscCallCUSPARSE(cusparseDestroyDnVec(mdata->cuSpMV[i].vecYDescr)); } } #endif delete *matstruct; *matstruct = NULL; } PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSETriFactors_Reset(Mat_SeqAIJCUSPARSETriFactors_p *trifactors) { Mat_SeqAIJCUSPARSETriFactors *fs = *trifactors; PetscFunctionBegin; if (fs) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtr)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->loTriFactorPtrTranspose)); PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&fs->upTriFactorPtrTranspose)); delete fs->rpermIndices; delete fs->cpermIndices; delete fs->workVector; fs->rpermIndices = NULL; fs->cpermIndices = NULL; fs->workVector = NULL; if (fs->a_band_d) PetscCallCUDA(cudaFree(fs->a_band_d)); if (fs->i_band_d) PetscCallCUDA(cudaFree(fs->i_band_d)); fs->init_dev_prop = PETSC_FALSE; #if CUSPARSE_VERSION >= 11500 PetscCallCUDA(cudaFree(fs->csrRowPtr)); PetscCallCUDA(cudaFree(fs->csrColIdx)); PetscCallCUDA(cudaFree(fs->csrVal)); PetscCallCUDA(cudaFree(fs->X)); PetscCallCUDA(cudaFree(fs->Y)); // PetscCallCUDA(cudaFree(fs->factBuffer_M)); /* No needed since factBuffer_M shares with one of spsvBuffer_L/U */ PetscCallCUDA(cudaFree(fs->spsvBuffer_L)); PetscCallCUDA(cudaFree(fs->spsvBuffer_U)); PetscCallCUDA(cudaFree(fs->spsvBuffer_Lt)); PetscCallCUDA(cudaFree(fs->spsvBuffer_Ut)); PetscCallCUSPARSE(cusparseDestroyMatDescr(fs->matDescr_M)); PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_L)); PetscCallCUSPARSE(cusparseDestroySpMat(fs->spMatDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_L)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Lt)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_U)); PetscCallCUSPARSE(cusparseSpSV_destroyDescr(fs->spsvDescr_Ut)); PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_X)); PetscCallCUSPARSE(cusparseDestroyDnVec(fs->dnVecDescr_Y)); PetscCallCUSPARSE(cusparseDestroyCsrilu02Info(fs->ilu0Info_M)); PetscCallCUSPARSE(cusparseDestroyCsric02Info(fs->ic0Info_M)); fs->createdTransposeSpSVDescr = PETSC_FALSE; fs->updatedTransposeSpSVAnalysis = PETSC_FALSE; #endif } PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCUSPARSETriFactors_Destroy(Mat_SeqAIJCUSPARSETriFactors **trifactors) { PetscFunctionBegin; if (*trifactors) { PetscCall(MatSeqAIJCUSPARSETriFactors_Reset(trifactors)); PetscCallCUSPARSE(cusparseDestroy((*trifactors)->handle)); PetscCall(PetscFree(*trifactors)); } PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct IJEqual { __host__ __device__ inline bool operator()(const thrust::tuple<PetscInt, PetscInt> &t1, const thrust::tuple<PetscInt, PetscInt> &t2) { if (t1.get<0>() != t2.get<0>() || t1.get<1>() != t2.get<1>()) return false; return true; } }; struct IJDiff { __host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 == t2 ? 0 : 1; } }; struct IJSum { __host__ __device__ inline PetscInt operator()(const PetscInt &t1, const PetscInt &t2) { return t1 || t2; } }; #include <thrust/iterator/discard_iterator.h> /* Associated with MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic() */ PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE_Basic(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; THRUSTARRAY *cooPerm_v = NULL; thrust::device_ptr<const PetscScalar> d_v; CsrMatrix *matrix; PetscInt n; PetscFunctionBegin; PetscCheck(cusp, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE struct"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUSPARSE CsrMatrix"); if (!cusp->cooPerm) { PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY)); PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY)); PetscFunctionReturn(PETSC_SUCCESS); } matrix = (CsrMatrix *)cusp->mat->mat; PetscCheck(matrix->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); if (!v) { if (imode == INSERT_VALUES) thrust::fill(thrust::device, matrix->values->begin(), matrix->values->end(), 0.); goto finalize; } n = cusp->cooPerm->size(); if (isCudaMem(v)) { d_v = thrust::device_pointer_cast(v); } else { cooPerm_v = new THRUSTARRAY(n); cooPerm_v->assign(v, v + n); d_v = cooPerm_v->data(); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); } PetscCall(PetscLogGpuTimeBegin()); if (imode == ADD_VALUES) { /* ADD VALUES means add to existing ones */ if (cusp->cooPerm_a) { /* there are repeated entries in d_v[], and we need to add these them */ THRUSTARRAY *cooPerm_w = new THRUSTARRAY(matrix->values->size()); auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()); /* thrust::reduce_by_key(keys_first,keys_last,values_first,keys_output,values_output) cooPerm_a = [0,0,1,2,3,4]. The length is n, number of nonozeros in d_v[]. cooPerm_a is ordered. d_v[i] is the cooPerm_a[i]-th unique nonzero. */ thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), cooPerm_w->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>()); thrust::transform(cooPerm_w->begin(), cooPerm_w->end(), matrix->values->begin(), matrix->values->begin(), thrust::plus<PetscScalar>()); delete cooPerm_w; } else { /* all nonzeros in d_v[] are unique entries */ auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit, zieit, VecCUDAPlusEquals()); /* values[i] += d_v[cooPerm[i]] */ } } else { if (cusp->cooPerm_a) { /* repeated entries in COO, with INSERT_VALUES -> reduce */ auto vbit = thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()); thrust::reduce_by_key(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), vbit, thrust::make_discard_iterator(), matrix->values->begin(), thrust::equal_to<PetscInt>(), thrust::plus<PetscScalar>()); } else { auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->begin()), matrix->values->begin())); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(d_v, cusp->cooPerm->end()), matrix->values->end())); thrust::for_each(zibit, zieit, VecCUDAEquals()); } } PetscCall(PetscLogGpuTimeEnd()); finalize: delete cooPerm_v; A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(PetscObjectStateIncrease((PetscObject)A)); /* shorter version of MatAssemblyEnd_SeqAIJ */ PetscCall(PetscInfo(A, "Matrix size: %" PetscInt_FMT " X %" PetscInt_FMT "; storage space: 0 unneeded,%" PetscInt_FMT " used\n", A->rmap->n, A->cmap->n, a->nz)); PetscCall(PetscInfo(A, "Number of mallocs during MatSetValues() is 0\n")); PetscCall(PetscInfo(A, "Maximum nonzeros in any row is %" PetscInt_FMT "\n", a->rmax)); a->reallocs = 0; A->info.mallocs += 0; A->info.nz_unneeded = 0; A->assembled = A->was_assembled = PETSC_TRUE; A->num_ass++; PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSeqAIJCUSPARSEInvalidateTranspose(Mat A, PetscBool destroy) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscFunctionBegin; PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (!cusp) PetscFunctionReturn(PETSC_SUCCESS); if (destroy) { PetscCall(MatSeqAIJCUSPARSEMultStruct_Destroy(&cusp->matTranspose, cusp->format)); delete cusp->csr2csc_i; cusp->csr2csc_i = NULL; } A->transupdated = PETSC_FALSE; PetscFunctionReturn(PETSC_SUCCESS); } #include <thrust/binary_search.h> /* 'Basic' means it only works when coo_i[] and coo_j[] do not contain negative indices */ PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(Mat A, PetscCount n, PetscInt coo_i[], PetscInt coo_j[]) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscInt cooPerm_n, nzr = 0; PetscFunctionBegin; PetscCall(PetscLayoutSetUp(A->rmap)); PetscCall(PetscLayoutSetUp(A->cmap)); cooPerm_n = cusp->cooPerm ? cusp->cooPerm->size() : 0; if (n != cooPerm_n) { delete cusp->cooPerm; delete cusp->cooPerm_a; cusp->cooPerm = NULL; cusp->cooPerm_a = NULL; } if (n) { thrust::device_ptr<PetscInt> d_i, d_j; PetscInt *d_raw_i, *d_raw_j; PetscBool free_raw_i = PETSC_FALSE, free_raw_j = PETSC_FALSE; PetscMemType imtype, jmtype; PetscCall(PetscGetMemType(coo_i, &imtype)); if (PetscMemTypeHost(imtype)) { PetscCallCUDA(cudaMalloc(&d_raw_i, sizeof(PetscInt) * n)); PetscCallCUDA(cudaMemcpy(d_raw_i, coo_i, sizeof(PetscInt) * n, cudaMemcpyHostToDevice)); d_i = thrust::device_pointer_cast(d_raw_i); free_raw_i = PETSC_TRUE; PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt))); } else { d_i = thrust::device_pointer_cast(coo_i); } PetscCall(PetscGetMemType(coo_j, &jmtype)); if (PetscMemTypeHost(jmtype)) { // MatSetPreallocationCOO_MPIAIJCUSPARSE_Basic() passes device coo_i[] and host coo_j[]! PetscCallCUDA(cudaMalloc(&d_raw_j, sizeof(PetscInt) * n)); PetscCallCUDA(cudaMemcpy(d_raw_j, coo_j, sizeof(PetscInt) * n, cudaMemcpyHostToDevice)); d_j = thrust::device_pointer_cast(d_raw_j); free_raw_j = PETSC_TRUE; PetscCall(PetscLogCpuToGpu(1. * n * sizeof(PetscInt))); } else { d_j = thrust::device_pointer_cast(coo_j); } THRUSTINTARRAY ii(A->rmap->n); if (!cusp->cooPerm) cusp->cooPerm = new THRUSTINTARRAY(n); if (!cusp->cooPerm_a) cusp->cooPerm_a = new THRUSTINTARRAY(n); /* Ex. n = 6 coo_i = [3,3,1,4,1,4] coo_j = [3,2,2,5,2,6] */ auto fkey = thrust::make_zip_iterator(thrust::make_tuple(d_i, d_j)); auto ekey = thrust::make_zip_iterator(thrust::make_tuple(d_i + n, d_j + n)); PetscCall(PetscLogGpuTimeBegin()); thrust::sequence(thrust::device, cusp->cooPerm->begin(), cusp->cooPerm->end(), 0); thrust::sort_by_key(fkey, ekey, cusp->cooPerm->begin(), IJCompare()); /* sort by row, then by col */ (*cusp->cooPerm_a).assign(d_i, d_i + n); /* copy the sorted array */ THRUSTINTARRAY w(d_j, d_j + n); /* d_i = [1,1,3,3,4,4] d_j = [2,2,2,3,5,6] cooPerm = [2,4,1,0,3,5] */ auto nekey = thrust::unique(fkey, ekey, IJEqual()); /* unique (d_i, d_j) */ /* d_i = [1,3,3,4,4,x] ^ekey d_j = [2,2,3,5,6,x] ^nekye */ if (nekey == ekey) { /* all entries are unique */ delete cusp->cooPerm_a; cusp->cooPerm_a = NULL; } else { /* Stefano: I couldn't come up with a more elegant algorithm */ /* idea: any change in i or j in the (i,j) sequence implies a new nonzero */ adjacent_difference(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), IJDiff()); /* cooPerm_a: [1,1,3,3,4,4] => [1,0,1,0,1,0]*/ adjacent_difference(w.begin(), w.end(), w.begin(), IJDiff()); /* w: [2,2,2,3,5,6] => [2,0,0,1,1,1]*/ (*cusp->cooPerm_a)[0] = 0; /* clear the first entry, though accessing an entry on device implies a cudaMemcpy */ w[0] = 0; thrust::transform(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), w.begin(), cusp->cooPerm_a->begin(), IJSum()); /* cooPerm_a = [0,0,1,1,1,1]*/ thrust::inclusive_scan(cusp->cooPerm_a->begin(), cusp->cooPerm_a->end(), cusp->cooPerm_a->begin(), thrust::plus<PetscInt>()); /*cooPerm_a=[0,0,1,2,3,4]*/ } thrust::counting_iterator<PetscInt> search_begin(0); thrust::upper_bound(d_i, nekey.get_iterator_tuple().get<0>(), /* binary search entries of [0,1,2,3,4,5,6) in ordered array d_i = [1,3,3,4,4], supposing A->rmap->n = 6. */ search_begin, search_begin + A->rmap->n, /* return in ii[] the index of last position in d_i[] where value could be inserted without violating the ordering */ ii.begin()); /* ii = [0,1,1,3,5,5]. A leading 0 will be added later */ PetscCall(PetscLogGpuTimeEnd()); PetscCall(MatSeqXAIJFreeAIJ(A, &a->a, &a->j, &a->i)); a->singlemalloc = PETSC_FALSE; a->free_a = PETSC_TRUE; a->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(A->rmap->n + 1, &a->i)); a->i[0] = 0; /* a->i = [0,0,1,1,3,5,5] */ PetscCallCUDA(cudaMemcpy(a->i + 1, ii.data().get(), A->rmap->n * sizeof(PetscInt), cudaMemcpyDeviceToHost)); a->nz = a->maxnz = a->i[A->rmap->n]; a->rmax = 0; PetscCall(PetscMalloc1(a->nz, &a->a)); PetscCall(PetscMalloc1(a->nz, &a->j)); PetscCallCUDA(cudaMemcpy(a->j, thrust::raw_pointer_cast(d_j), a->nz * sizeof(PetscInt), cudaMemcpyDeviceToHost)); if (!a->ilen) PetscCall(PetscMalloc1(A->rmap->n, &a->ilen)); if (!a->imax) PetscCall(PetscMalloc1(A->rmap->n, &a->imax)); for (PetscInt i = 0; i < A->rmap->n; i++) { const PetscInt nnzr = a->i[i + 1] - a->i[i]; nzr += (PetscInt) !!(nnzr); a->ilen[i] = a->imax[i] = nnzr; a->rmax = PetscMax(a->rmax, nnzr); } a->nonzerorowcnt = nzr; A->preallocated = PETSC_TRUE; PetscCall(PetscLogGpuToCpu((A->rmap->n + a->nz) * sizeof(PetscInt))); PetscCall(MatMarkDiagonal_SeqAIJ(A)); if (free_raw_i) PetscCallCUDA(cudaFree(d_raw_i)); if (free_raw_j) PetscCallCUDA(cudaFree(d_raw_j)); } else { PetscCall(MatSeqAIJSetPreallocation(A, 0, NULL)); } PetscCall(MatSetOption(A, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_TRUE)); /* We want to allocate the CUSPARSE struct for matvec now. The code is so convoluted now that I prefer to copy zeros */ PetscCall(PetscArrayzero(a->a, a->nz)); PetscCall(MatCheckCompressedRow(A, nzr, &a->compressedrow, a->i, A->rmap->n, 0.6)); A->offloadmask = PETSC_OFFLOAD_CPU; PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_TRUE)); PetscFunctionReturn(PETSC_SUCCESS); } PetscErrorCode MatSetPreallocationCOO_SeqAIJCUSPARSE(Mat mat, PetscCount coo_n, PetscInt coo_i[], PetscInt coo_j[]) { Mat_SeqAIJ *seq; Mat_SeqAIJCUSPARSE *dev; PetscBool coo_basic = PETSC_TRUE; PetscMemType mtype = PETSC_MEMTYPE_DEVICE; PetscFunctionBegin; PetscCall(MatResetPreallocationCOO_SeqAIJ(mat)); PetscCall(MatResetPreallocationCOO_SeqAIJCUSPARSE(mat)); if (coo_i) { PetscCall(PetscGetMemType(coo_i, &mtype)); if (PetscMemTypeHost(mtype)) { for (PetscCount k = 0; k < coo_n; k++) { if (coo_i[k] < 0 || coo_j[k] < 0) { coo_basic = PETSC_FALSE; break; } } } } if (coo_basic) { /* i,j are on device or do not contain negative indices */ PetscCall(MatSetPreallocationCOO_SeqAIJCUSPARSE_Basic(mat, coo_n, coo_i, coo_j)); } else { PetscCall(MatSetPreallocationCOO_SeqAIJ(mat, coo_n, coo_i, coo_j)); mat->offloadmask = PETSC_OFFLOAD_CPU; PetscCall(MatSeqAIJCUSPARSECopyToGPU(mat)); seq = static_cast<Mat_SeqAIJ *>(mat->data); dev = static_cast<Mat_SeqAIJCUSPARSE *>(mat->spptr); PetscCallCUDA(cudaMalloc((void **)&dev->jmap_d, (seq->nz + 1) * sizeof(PetscCount))); PetscCallCUDA(cudaMemcpy(dev->jmap_d, seq->jmap, (seq->nz + 1) * sizeof(PetscCount), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMalloc((void **)&dev->perm_d, seq->Atot * sizeof(PetscCount))); PetscCallCUDA(cudaMemcpy(dev->perm_d, seq->perm, seq->Atot * sizeof(PetscCount), cudaMemcpyHostToDevice)); dev->use_extended_coo = PETSC_TRUE; } PetscFunctionReturn(PETSC_SUCCESS); } __global__ static void MatAddCOOValues(const PetscScalar kv[], PetscCount nnz, const PetscCount jmap[], const PetscCount perm[], InsertMode imode, PetscScalar a[]) { PetscCount i = blockIdx.x * blockDim.x + threadIdx.x; const PetscCount grid_size = gridDim.x * blockDim.x; for (; i < nnz; i += grid_size) { PetscScalar sum = 0.0; for (PetscCount k = jmap[i]; k < jmap[i + 1]; k++) sum += kv[perm[k]]; a[i] = (imode == INSERT_VALUES ? 0.0 : a[i]) + sum; } } PetscErrorCode MatSetValuesCOO_SeqAIJCUSPARSE(Mat A, const PetscScalar v[], InsertMode imode) { Mat_SeqAIJ *seq = (Mat_SeqAIJ *)A->data; Mat_SeqAIJCUSPARSE *dev = (Mat_SeqAIJCUSPARSE *)A->spptr; PetscCount Annz = seq->nz; PetscMemType memtype; const PetscScalar *v1 = v; PetscScalar *Aa; PetscFunctionBegin; if (dev->use_extended_coo) { PetscCall(PetscGetMemType(v, &memtype)); if (PetscMemTypeHost(memtype)) { /* If user gave v[] in host, we might need to copy it to device if any */ PetscCallCUDA(cudaMalloc((void **)&v1, seq->coo_n * sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy((void *)v1, v, seq->coo_n * sizeof(PetscScalar), cudaMemcpyHostToDevice)); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSEGetArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSEGetArray(A, &Aa)); if (Annz) { MatAddCOOValues<<<(Annz + 255) / 256, 256>>>(v1, Annz, dev->jmap_d, dev->perm_d, imode, Aa); PetscCallCUDA(cudaPeekAtLastError()); } if (imode == INSERT_VALUES) PetscCall(MatSeqAIJCUSPARSERestoreArrayWrite(A, &Aa)); else PetscCall(MatSeqAIJCUSPARSERestoreArray(A, &Aa)); if (PetscMemTypeHost(memtype)) PetscCallCUDA(cudaFree((void *)v1)); } else { PetscCall(MatSetValuesCOO_SeqAIJCUSPARSE_Basic(A, v, imode)); } PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetIJ - returns the device row storage `i` and `j` indices for `MATSEQAIJCUSPARSE` matrices. Not Collective Input Parameters: + A - the matrix - compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form Output Parameters: + ia - the CSR row pointers - ja - the CSR column indices Level: developer Note: When compressed is true, the CSR structure does not contain empty rows .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSERestoreIJ()`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetIJ(Mat A, PetscBool compressed, const int **i, const int **j) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); if (!i || !j) PetscFunctionReturn(PETSC_SUCCESS); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; if (i) { if (!compressed && a->compressedrow.use) { /* need full row offset */ if (!cusp->rowoffsets_gpu) { cusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); cusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } *i = cusp->rowoffsets_gpu->data().get(); } else *i = csr->row_offsets->data().get(); } if (j) *j = csr->column_indices->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreIJ - restore the device row storage `i` and `j` indices obtained with `MatSeqAIJCUSPARSEGetIJ()` Not Collective Input Parameters: + A - the matrix . compressed - `PETSC_TRUE` or `PETSC_FALSE` indicating the matrix data structure should be always returned in compressed form . ia - the CSR row pointers - ja - the CSR column indices Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetIJ()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreIJ(Mat A, PetscBool, const int **i, const int **j) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); if (i) *i = NULL; if (j) *j = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayRead - gives read-only access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayRead(Mat A, const PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayRead - restore the read-only access array obtained from `MatSeqAIJCUSPARSEGetArrayRead()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayRead(Mat A, const PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArray - gives read-write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: May trigger host-device copies if up-to-date matrix data is on host .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSEGetArrayWrite()`, `MatSeqAIJCUSPARSERestoreArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArray(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArray - restore the read-write access array obtained from `MatSeqAIJCUSPARSEGetArray()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArray(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSEGetArrayWrite - gives write access to the array where the device data for a `MATSEQAIJCUSPARSE` matrix is stored Not Collective Input Parameter: . A - a `MATSEQAIJCUSPARSE` matrix Output Parameter: . a - pointer to the device data Level: developer Note: Does not trigger host-device copies and flags data validity on the GPU .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArray()`, `MatSeqAIJCUSPARSEGetArrayRead()`, `MatSeqAIJCUSPARSERestoreArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSEGetArrayWrite(Mat A, PetscScalar **a) { Mat_SeqAIJCUSPARSE *cusp = (Mat_SeqAIJCUSPARSE *)A->spptr; CsrMatrix *csr; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheck(cusp->format != MAT_CUSPARSE_ELL && cusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(cusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); csr = (CsrMatrix *)cusp->mat->mat; PetscCheck(csr->values, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing CUDA memory"); *a = csr->values->data().get(); A->offloadmask = PETSC_OFFLOAD_GPU; PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(A, PETSC_FALSE)); PetscFunctionReturn(PETSC_SUCCESS); } /*@C MatSeqAIJCUSPARSERestoreArrayWrite - restore the write-only access array obtained from `MatSeqAIJCUSPARSEGetArrayWrite()` Not Collective Input Parameters: + A - a `MATSEQAIJCUSPARSE` matrix - a - pointer to the device data Level: developer .seealso: [](chapter_matrices), `Mat`, `MatSeqAIJCUSPARSEGetArrayWrite()` @*/ PetscErrorCode MatSeqAIJCUSPARSERestoreArrayWrite(Mat A, PetscScalar **a) { PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidPointer(a, 2); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCall(MatSeqAIJInvalidateDiagonal(A)); PetscCall(PetscObjectStateIncrease((PetscObject)A)); *a = NULL; PetscFunctionReturn(PETSC_SUCCESS); } struct IJCompare4 { __host__ __device__ inline bool operator()(const thrust::tuple<int, int, PetscScalar, int> &t1, const thrust::tuple<int, int, PetscScalar, int> &t2) { if (t1.get<0>() < t2.get<0>()) return true; if (t1.get<0>() == t2.get<0>()) return t1.get<1>() < t2.get<1>(); return false; } }; struct Shift { int _shift; Shift(int shift) : _shift(shift) { } __host__ __device__ inline int operator()(const int &c) { return c + _shift; } }; /* merges two SeqAIJCUSPARSE matrices A, B by concatenating their rows. [A';B']' operation in matlab notation */ PetscErrorCode MatSeqAIJCUSPARSEMergeMats(Mat A, Mat B, MatReuse reuse, Mat *C) { Mat_SeqAIJ *a = (Mat_SeqAIJ *)A->data, *b = (Mat_SeqAIJ *)B->data, *c; Mat_SeqAIJCUSPARSE *Acusp = (Mat_SeqAIJCUSPARSE *)A->spptr, *Bcusp = (Mat_SeqAIJCUSPARSE *)B->spptr, *Ccusp; Mat_SeqAIJCUSPARSEMultStruct *Cmat; CsrMatrix *Acsr, *Bcsr, *Ccsr; PetscInt Annz, Bnnz; cusparseStatus_t stat; PetscInt i, m, n, zero = 0; PetscFunctionBegin; PetscValidHeaderSpecific(A, MAT_CLASSID, 1); PetscValidHeaderSpecific(B, MAT_CLASSID, 2); PetscValidPointer(C, 4); PetscCheckTypeName(A, MATSEQAIJCUSPARSE); PetscCheckTypeName(B, MATSEQAIJCUSPARSE); PetscCheck(A->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, A->rmap->n, B->rmap->n); PetscCheck(reuse != MAT_INPLACE_MATRIX, PETSC_COMM_SELF, PETSC_ERR_SUP, "MAT_INPLACE_MATRIX not supported"); PetscCheck(Acusp->format != MAT_CUSPARSE_ELL && Acusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Bcusp->format != MAT_CUSPARSE_ELL && Bcusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); if (reuse == MAT_INITIAL_MATRIX) { m = A->rmap->n; n = A->cmap->n + B->cmap->n; PetscCall(MatCreate(PETSC_COMM_SELF, C)); PetscCall(MatSetSizes(*C, m, n, m, n)); PetscCall(MatSetType(*C, MATSEQAIJCUSPARSE)); c = (Mat_SeqAIJ *)(*C)->data; Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; Cmat = new Mat_SeqAIJCUSPARSEMultStruct; Ccsr = new CsrMatrix; Cmat->cprowIndices = NULL; c->compressedrow.use = PETSC_FALSE; c->compressedrow.nrows = 0; c->compressedrow.i = NULL; c->compressedrow.rindex = NULL; Ccusp->workVector = NULL; Ccusp->nrows = m; Ccusp->mat = Cmat; Ccusp->mat->mat = Ccsr; Ccsr->num_rows = m; Ccsr->num_cols = n; PetscCallCUSPARSE(cusparseCreateMatDescr(&Cmat->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(Cmat->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(Cmat->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(Cmat->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(Cmat->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(Cmat->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(Cmat->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Annz = (PetscInt)Acsr->column_indices->size(); Bnnz = (PetscInt)Bcsr->column_indices->size(); c->nz = Annz + Bnnz; Ccsr->row_offsets = new THRUSTINTARRAY32(m + 1); Ccsr->column_indices = new THRUSTINTARRAY32(c->nz); Ccsr->values = new THRUSTARRAY(c->nz); Ccsr->num_entries = c->nz; Ccusp->cooPerm = new THRUSTINTARRAY(c->nz); if (c->nz) { auto Acoo = new THRUSTINTARRAY32(Annz); auto Bcoo = new THRUSTINTARRAY32(Bnnz); auto Ccoo = new THRUSTINTARRAY32(c->nz); THRUSTINTARRAY32 *Aroff, *Broff; if (a->compressedrow.use) { /* need full row offset */ if (!Acusp->rowoffsets_gpu) { Acusp->rowoffsets_gpu = new THRUSTINTARRAY32(A->rmap->n + 1); Acusp->rowoffsets_gpu->assign(a->i, a->i + A->rmap->n + 1); PetscCall(PetscLogCpuToGpu((A->rmap->n + 1) * sizeof(PetscInt))); } Aroff = Acusp->rowoffsets_gpu; } else Aroff = Acsr->row_offsets; if (b->compressedrow.use) { /* need full row offset */ if (!Bcusp->rowoffsets_gpu) { Bcusp->rowoffsets_gpu = new THRUSTINTARRAY32(B->rmap->n + 1); Bcusp->rowoffsets_gpu->assign(b->i, b->i + B->rmap->n + 1); PetscCall(PetscLogCpuToGpu((B->rmap->n + 1) * sizeof(PetscInt))); } Broff = Bcusp->rowoffsets_gpu; } else Broff = Bcsr->row_offsets; PetscCall(PetscLogGpuTimeBegin()); stat = cusparseXcsr2coo(Acusp->handle, Aroff->data().get(), Annz, m, Acoo->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); stat = cusparseXcsr2coo(Bcusp->handle, Broff->data().get(), Bnnz, m, Bcoo->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); /* Issues when using bool with large matrices on SUMMIT 10.2.89 */ auto Aperm = thrust::make_constant_iterator(1); auto Bperm = thrust::make_constant_iterator(0); #if PETSC_PKG_CUDA_VERSION_GE(10, 0, 0) auto Bcib = thrust::make_transform_iterator(Bcsr->column_indices->begin(), Shift(A->cmap->n)); auto Bcie = thrust::make_transform_iterator(Bcsr->column_indices->end(), Shift(A->cmap->n)); #else /* there are issues instantiating the merge operation using a transform iterator for the columns of B */ auto Bcib = Bcsr->column_indices->begin(); auto Bcie = Bcsr->column_indices->end(); thrust::transform(Bcib, Bcie, Bcib, Shift(A->cmap->n)); #endif auto wPerm = new THRUSTINTARRAY32(Annz + Bnnz); auto Azb = thrust::make_zip_iterator(thrust::make_tuple(Acoo->begin(), Acsr->column_indices->begin(), Acsr->values->begin(), Aperm)); auto Aze = thrust::make_zip_iterator(thrust::make_tuple(Acoo->end(), Acsr->column_indices->end(), Acsr->values->end(), Aperm)); auto Bzb = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->begin(), Bcib, Bcsr->values->begin(), Bperm)); auto Bze = thrust::make_zip_iterator(thrust::make_tuple(Bcoo->end(), Bcie, Bcsr->values->end(), Bperm)); auto Czb = thrust::make_zip_iterator(thrust::make_tuple(Ccoo->begin(), Ccsr->column_indices->begin(), Ccsr->values->begin(), wPerm->begin())); auto p1 = Ccusp->cooPerm->begin(); auto p2 = Ccusp->cooPerm->begin(); thrust::advance(p2, Annz); PetscCallThrust(thrust::merge(thrust::device, Azb, Aze, Bzb, Bze, Czb, IJCompare4())); #if PETSC_PKG_CUDA_VERSION_LT(10, 0, 0) thrust::transform(Bcib, Bcie, Bcib, Shift(-A->cmap->n)); #endif auto cci = thrust::make_counting_iterator(zero); auto cce = thrust::make_counting_iterator(c->nz); #if 0 //Errors on SUMMIT cuda 11.1.0 PetscCallThrust(thrust::partition_copy(thrust::device,cci,cce,wPerm->begin(),p1,p2,thrust::identity<int>())); #else auto pred = thrust::identity<int>(); PetscCallThrust(thrust::copy_if(thrust::device, cci, cce, wPerm->begin(), p1, pred)); PetscCallThrust(thrust::remove_copy_if(thrust::device, cci, cce, wPerm->begin(), p2, pred)); #endif stat = cusparseXcoo2csr(Ccusp->handle, Ccoo->data().get(), c->nz, m, Ccsr->row_offsets->data().get(), CUSPARSE_INDEX_BASE_ZERO); PetscCallCUSPARSE(stat); PetscCall(PetscLogGpuTimeEnd()); delete wPerm; delete Acoo; delete Bcoo; delete Ccoo; #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = cusparseCreateCsr(&Cmat->matDescr, Ccsr->num_rows, Ccsr->num_cols, Ccsr->num_entries, Ccsr->row_offsets->data().get(), Ccsr->column_indices->data().get(), Ccsr->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif if (A->form_explicit_transpose && B->form_explicit_transpose) { /* if A and B have the transpose, generate C transpose too */ PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(A)); PetscCall(MatSeqAIJCUSPARSEFormExplicitTranspose(B)); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; Mat_SeqAIJCUSPARSEMultStruct *CmatT = new Mat_SeqAIJCUSPARSEMultStruct; CsrMatrix *CcsrT = new CsrMatrix; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; (*C)->form_explicit_transpose = PETSC_TRUE; (*C)->transupdated = PETSC_TRUE; Ccusp->rowoffsets_gpu = NULL; CmatT->cprowIndices = NULL; CmatT->mat = CcsrT; CcsrT->num_rows = n; CcsrT->num_cols = m; CcsrT->num_entries = c->nz; CcsrT->row_offsets = new THRUSTINTARRAY32(n + 1); CcsrT->column_indices = new THRUSTINTARRAY32(c->nz); CcsrT->values = new THRUSTARRAY(c->nz); PetscCall(PetscLogGpuTimeBegin()); auto rT = CcsrT->row_offsets->begin(); if (AT) { rT = thrust::copy(AcsrT->row_offsets->begin(), AcsrT->row_offsets->end(), rT); thrust::advance(rT, -1); } if (BT) { auto titb = thrust::make_transform_iterator(BcsrT->row_offsets->begin(), Shift(a->nz)); auto tite = thrust::make_transform_iterator(BcsrT->row_offsets->end(), Shift(a->nz)); thrust::copy(titb, tite, rT); } auto cT = CcsrT->column_indices->begin(); if (AT) cT = thrust::copy(AcsrT->column_indices->begin(), AcsrT->column_indices->end(), cT); if (BT) thrust::copy(BcsrT->column_indices->begin(), BcsrT->column_indices->end(), cT); auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); PetscCall(PetscLogGpuTimeEnd()); PetscCallCUSPARSE(cusparseCreateMatDescr(&CmatT->descr)); PetscCallCUSPARSE(cusparseSetMatIndexBase(CmatT->descr, CUSPARSE_INDEX_BASE_ZERO)); PetscCallCUSPARSE(cusparseSetMatType(CmatT->descr, CUSPARSE_MATRIX_TYPE_GENERAL)); PetscCallCUDA(cudaMalloc((void **)&(CmatT->alpha_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_zero), sizeof(PetscScalar))); PetscCallCUDA(cudaMalloc((void **)&(CmatT->beta_one), sizeof(PetscScalar))); PetscCallCUDA(cudaMemcpy(CmatT->alpha_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(CmatT->beta_zero, &PETSC_CUSPARSE_ZERO, sizeof(PetscScalar), cudaMemcpyHostToDevice)); PetscCallCUDA(cudaMemcpy(CmatT->beta_one, &PETSC_CUSPARSE_ONE, sizeof(PetscScalar), cudaMemcpyHostToDevice)); #if PETSC_PKG_CUDA_VERSION_GE(11, 0, 0) stat = cusparseCreateCsr(&CmatT->matDescr, CcsrT->num_rows, CcsrT->num_cols, CcsrT->num_entries, CcsrT->row_offsets->data().get(), CcsrT->column_indices->data().get(), CcsrT->values->data().get(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, cusparse_scalartype); PetscCallCUSPARSE(stat); #endif Ccusp->matTranspose = CmatT; } } c->singlemalloc = PETSC_FALSE; c->free_a = PETSC_TRUE; c->free_ij = PETSC_TRUE; PetscCall(PetscMalloc1(m + 1, &c->i)); PetscCall(PetscMalloc1(c->nz, &c->j)); if (PetscDefined(USE_64BIT_INDICES)) { /* 32 to 64 bit conversion on the GPU and then copy to host (lazy) */ THRUSTINTARRAY ii(Ccsr->row_offsets->size()); THRUSTINTARRAY jj(Ccsr->column_indices->size()); ii = *Ccsr->row_offsets; jj = *Ccsr->column_indices; PetscCallCUDA(cudaMemcpy(c->i, ii.data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, jj.data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } else { PetscCallCUDA(cudaMemcpy(c->i, Ccsr->row_offsets->data().get(), Ccsr->row_offsets->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); PetscCallCUDA(cudaMemcpy(c->j, Ccsr->column_indices->data().get(), Ccsr->column_indices->size() * sizeof(PetscInt), cudaMemcpyDeviceToHost)); } PetscCall(PetscLogGpuToCpu((Ccsr->column_indices->size() + Ccsr->row_offsets->size()) * sizeof(PetscInt))); PetscCall(PetscMalloc1(m, &c->ilen)); PetscCall(PetscMalloc1(m, &c->imax)); c->maxnz = c->nz; c->nonzerorowcnt = 0; c->rmax = 0; for (i = 0; i < m; i++) { const PetscInt nn = c->i[i + 1] - c->i[i]; c->ilen[i] = c->imax[i] = nn; c->nonzerorowcnt += (PetscInt) !!nn; c->rmax = PetscMax(c->rmax, nn); } PetscCall(MatMarkDiagonal_SeqAIJ(*C)); PetscCall(PetscMalloc1(c->nz, &c->a)); (*C)->nonzerostate++; PetscCall(PetscLayoutSetUp((*C)->rmap)); PetscCall(PetscLayoutSetUp((*C)->cmap)); Ccusp->nonzerostate = (*C)->nonzerostate; (*C)->preallocated = PETSC_TRUE; } else { PetscCheck((*C)->rmap->n == B->rmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Invalid number or rows %" PetscInt_FMT " != %" PetscInt_FMT, (*C)->rmap->n, B->rmap->n); c = (Mat_SeqAIJ *)(*C)->data; if (c->nz) { Ccusp = (Mat_SeqAIJCUSPARSE *)(*C)->spptr; PetscCheck(Ccusp->cooPerm, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing cooPerm"); PetscCheck(Ccusp->format != MAT_CUSPARSE_ELL && Ccusp->format != MAT_CUSPARSE_HYB, PETSC_COMM_SELF, PETSC_ERR_SUP, "Not implemented"); PetscCheck(Ccusp->nonzerostate == (*C)->nonzerostate, PETSC_COMM_SELF, PETSC_ERR_COR, "Wrong nonzerostate"); PetscCall(MatSeqAIJCUSPARSECopyToGPU(A)); PetscCall(MatSeqAIJCUSPARSECopyToGPU(B)); PetscCheck(Acusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); PetscCheck(Bcusp->mat, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing Mat_SeqAIJCUSPARSEMultStruct"); Acsr = (CsrMatrix *)Acusp->mat->mat; Bcsr = (CsrMatrix *)Bcusp->mat->mat; Ccsr = (CsrMatrix *)Ccusp->mat->mat; PetscCheck(Acsr->num_entries == (PetscInt)Acsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "A nnz %" PetscInt_FMT " != %" PetscInt_FMT, Acsr->num_entries, (PetscInt)Acsr->values->size()); PetscCheck(Bcsr->num_entries == (PetscInt)Bcsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "B nnz %" PetscInt_FMT " != %" PetscInt_FMT, Bcsr->num_entries, (PetscInt)Bcsr->values->size()); PetscCheck(Ccsr->num_entries == (PetscInt)Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT, Ccsr->num_entries, (PetscInt)Ccsr->values->size()); PetscCheck(Ccsr->num_entries == Acsr->num_entries + Bcsr->num_entries, PETSC_COMM_SELF, PETSC_ERR_COR, "C nnz %" PetscInt_FMT " != %" PetscInt_FMT " + %" PetscInt_FMT, Ccsr->num_entries, Acsr->num_entries, Bcsr->num_entries); PetscCheck(Ccusp->cooPerm->size() == Ccsr->values->size(), PETSC_COMM_SELF, PETSC_ERR_COR, "permSize %" PetscInt_FMT " != %" PetscInt_FMT, (PetscInt)Ccusp->cooPerm->size(), (PetscInt)Ccsr->values->size()); auto pmid = Ccusp->cooPerm->begin(); thrust::advance(pmid, Acsr->num_entries); PetscCall(PetscLogGpuTimeBegin()); auto zibait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->begin()))); auto zieait = thrust::make_zip_iterator(thrust::make_tuple(Acsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); thrust::for_each(zibait, zieait, VecCUDAEquals()); auto zibbit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->begin(), thrust::make_permutation_iterator(Ccsr->values->begin(), pmid))); auto ziebit = thrust::make_zip_iterator(thrust::make_tuple(Bcsr->values->end(), thrust::make_permutation_iterator(Ccsr->values->begin(), Ccusp->cooPerm->end()))); thrust::for_each(zibbit, ziebit, VecCUDAEquals()); PetscCall(MatSeqAIJCUSPARSEInvalidateTranspose(*C, PETSC_FALSE)); if (A->form_explicit_transpose && B->form_explicit_transpose && (*C)->form_explicit_transpose) { PetscCheck(Ccusp->matTranspose, PETSC_COMM_SELF, PETSC_ERR_COR, "Missing transpose Mat_SeqAIJCUSPARSEMultStruct"); PetscBool AT = Acusp->matTranspose ? PETSC_TRUE : PETSC_FALSE, BT = Bcusp->matTranspose ? PETSC_TRUE : PETSC_FALSE; CsrMatrix *AcsrT = AT ? (CsrMatrix *)Acusp->matTranspose->mat : NULL; CsrMatrix *BcsrT = BT ? (CsrMatrix *)Bcusp->matTranspose->mat : NULL; CsrMatrix *CcsrT = (CsrMatrix *)Ccusp->matTranspose->mat; auto vT = CcsrT->values->begin(); if (AT) vT = thrust::copy(AcsrT->values->begin(), AcsrT->values->end(), vT); if (BT) thrust::copy(BcsrT->values->begin(), BcsrT->values->end(), vT); (*C)->transupdated = PETSC_TRUE; } PetscCall(PetscLogGpuTimeEnd()); } } PetscCall(PetscObjectStateIncrease((PetscObject)*C)); (*C)->assembled = PETSC_TRUE; (*C)->was_assembled = PETSC_FALSE; (*C)->offloadmask = PETSC_OFFLOAD_GPU; PetscFunctionReturn(PETSC_SUCCESS); } static PetscErrorCode MatSeqAIJCopySubArray_SeqAIJCUSPARSE(Mat A, PetscInt n, const PetscInt idx[], PetscScalar v[]) { bool dmem; const PetscScalar *av; PetscFunctionBegin; dmem = isCudaMem(v); PetscCall(MatSeqAIJCUSPARSEGetArrayRead(A, &av)); if (n && idx) { THRUSTINTARRAY widx(n); widx.assign(idx, idx + n); PetscCall(PetscLogCpuToGpu(n * sizeof(PetscInt))); THRUSTARRAY *w = NULL; thrust::device_ptr<PetscScalar> dv; if (dmem) { dv = thrust::device_pointer_cast(v); } else { w = new THRUSTARRAY(n); dv = w->data(); } thrust::device_ptr<const PetscScalar> dav = thrust::device_pointer_cast(av); auto zibit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.begin()), dv)); auto zieit = thrust::make_zip_iterator(thrust::make_tuple(thrust::make_permutation_iterator(dav, widx.end()), dv + n)); thrust::for_each(zibit, zieit, VecCUDAEquals()); if (w) PetscCallCUDA(cudaMemcpy(v, w->data().get(), n * sizeof(PetscScalar), cudaMemcpyDeviceToHost)); delete w; } else { PetscCallCUDA(cudaMemcpy(v, av, n * sizeof(PetscScalar), dmem ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost)); } if (!dmem) PetscCall(PetscLogCpuToGpu(n * sizeof(PetscScalar))); PetscCall(MatSeqAIJCUSPARSERestoreArrayRead(A, &av)); PetscFunctionReturn(PETSC_SUCCESS); }
a69d80ac3c572514ecfbe32236f1853d29897508.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <assert.h> #include <THH/THH.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 #include <sys/time.h> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_sign(scalar_t a){ if (a > 0.0){ return 1; } else if (a == 0.0){ return 0; } else{ return -1; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_parallel_cuda_abs(x - x1) + line_variance_parallel_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void cal_line_gradient(scalar_t* grad, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t c12 = c1 / line_variance_parallel_cuda_divide_non_zero(c2 * c2); scalar_t cx = - dx1x - dx1x2; scalar_t cy = - dy1y - dy1y2; scalar_t d1 = - dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = - dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); //scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); scalar_t dif_x1 = (2 * dx1x2 * dy1y2 * c12 + dy1y2 * cx / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2) + (2 * dx1x2 * dx1x2 * c12 + dx1x2 * cx / line_variance_parallel_cuda_divide_non_zero(c2) + 1 - c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1); scalar_t dif_y1 = (2 * dx1x2 * dy1y2 * c12 + dx1x2 * cy / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1) + (2 * dy1y2 * dy1y2 * c12 + dy1y2 * cy / line_variance_parallel_cuda_divide_non_zero(c2) + 1 - c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2); scalar_t dif_x2 = (dx1x * dy1y2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dy1y2 * c12) * line_variance_parallel_cuda_sign(d2) + (dx1x * dx1x2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dx1x2 * c12 + c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1); scalar_t dif_y2 = (dx1x2 * dy1y / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dy1y2 * c12) * line_variance_parallel_cuda_sign(d1) + (dy1y * dy1y2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dy1y2 * dy1y2 * c12 + c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2); grad[0] = dif_x1; grad[1] = dif_y1; grad[2] = dif_x2; grad[3] = dif_y2; } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary ret[0] = 0; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_parallel_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_parallel_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ //distance to line ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ //distance to point ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) return; int total_idx = bidx * n_pixel * n_grid + pixel_idx * n_grid; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t x1, y1, x2, y2; // tmp variable for calculating the gradients scalar_t min_distance = 0.0; scalar_t sum_exp = 0.0; int min_distance_idx = 0; int idx_one = 0; int idx_two = 0; scalar_t find_sign = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; scalar_t grad[4] = {0}; scalar_t condition; for (int grididx = 0; grididx < n_grid; grididx++){ ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign ==0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } scalar_t sum_gradient = 0.0; scalar_t pixel_f = 0.0; scalar_t grid_f = 0.0; scalar_t diff = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; for (int grididx = 0; grididx < n_grid; grididx ++){ buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / line_variance_parallel_cuda_divide_non_zero(sum_exp); difference = 0.0; grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } sum_gradient += (buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + \ grid_f_sum)); } find_sign = 0.0; scalar_t dl_dmindist_element = 0.0; for (int grididx = 0; grididx < n_grid; grididx++){ scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } dl_dmindist_element = buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum) - \ sum_gradient * buffer_bxnxk[bidx][pixel_idx][grididx]; // gradient for the softmax // Get the minimum index distance ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; min_distance_idx = ret[2]; int mem_gradient_idx = bidx * n_grid * 3 * 2 + grididx * 3 * 2; float in_out_sign; if (condition < 0){ continue; } if (condition == 0 || condition == 1){ in_out_sign = 1 - condition * 2; idx_one = min_distance_idx; idx_two = (min_distance_idx + 1 ) % 3; x1 = grid_bxkx3x2[bidx][grididx][idx_one][0]; y1 = grid_bxkx3x2[bidx][grididx][idx_one][1]; x2 = grid_bxkx3x2[bidx][grididx][idx_two][0]; y2 = grid_bxkx3x2[bidx][grididx][idx_two][1]; cal_line_gradient(grad, x1, y1, x2, y2, x0, y0); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2)), float(dl_dmindist_element * grad[0] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2 + 1)), float(dl_dmindist_element * grad[1] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2)), float(dl_dmindist_element * grad[2] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2 + 1)), float(dl_dmindist_element * grad[3] / sigma * in_out_sign)); } else{ in_out_sign = -1; x1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; y1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; float signx, signy; if (x1 > x0){ signx = 1; } else{ signx = -1; } if (y1 > y0){ signy = 1; } else{ signy = -1; } atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2)), float(signx * dl_dmindist_element / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2 + 1)), float(signy * dl_dmindist_element / sigma * in_out_sign)); } } } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch_calc_buffer( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) return; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t ax, ay, bx, by, cx, cy; scalar_t ret[3] = {0}; scalar_t condition; ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; scalar_t min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0){ min_distance = min_distance / sigma; } else{ min_distance = - min_distance / sigma; } buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } #define BLOCK_SIZE 1024 #define WARP_SIZE 32 template <typename scalar_t> __inline__ __device__ scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val += __shfl_down(val, offset); } return val; } template <typename scalar_t> __global__ void blockReduceSum( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ max_dist_bxn, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ scalar_t max_dist = max_dist_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] = expf(buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] - max_dist); val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void blockReduceSum_sumGradient( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ sum_exp_bxn, int bnum, int n_pixel, int n_grid, int d_fea, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; scalar_t pixel_f = 0.0; scalar_t grid_f = 0.0; scalar_t diff = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; scalar_t sum_exp = sum_exp_bxn[pixel_idx]; if (pixel_idx < n_pixel && grididx < n_grid){ buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / line_variance_parallel_cuda_divide_non_zero(sum_exp); difference = 0.0; grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } val = (buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum)); } // sum_grad_bxn[bidx * n_pixel + pixel_idx] = sum_gradient; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __inline__ __device__ scalar_t warpReduceMax(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val = max(val, __shfl_down(val, offset)); } return val; } template <typename scalar_t> __global__ void blockReduceMax( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = -MAX_DIS; scalar_t val = -MAX_DIS; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceMax(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : -MAX_DIS; if (wid==0){ val = warpReduceMax(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t max_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ if(buffer_bxnx4[base_idx + t] > max_v){ max_v = buffer_bxnx4[base_idx + t]; } } buffer_bxn[bidx * n_pixel + pixel_idx] = max_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t sum_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnx4[base_idx + t]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_v; } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch_gradient( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ sum_grad_bxn, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) return; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t ax, ay, bx, by, cx, cy; scalar_t x1, y1, x2, y2; // tmp variable for calculating the gradients scalar_t ret[3] = {0}; int min_distance_idx = 0; int idx_one = 0; int idx_two = 0; scalar_t grad[4] = {0}; scalar_t condition; scalar_t sum_gradient = sum_grad_bxn[bidx * n_pixel + pixel_idx]; scalar_t dl_dmindist_element = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } dl_dmindist_element = buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum) - \ sum_gradient * buffer_bxnxk[bidx][pixel_idx][grididx]; // gradient for the softmax // Get the minimum index distance ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; scalar_t min_distance = ret[1]; min_distance_idx = static_cast<int>(ret[2]); int mem_gradient_idx = bidx * n_grid * 3 * 2 + grididx * 3 * 2; float in_out_sign; if (condition < 0){ return; } if (condition == 0 || condition == 1){ in_out_sign = 1 - condition * 2; idx_one = min_distance_idx; idx_two = (min_distance_idx + 1 ) % 3; x1 = grid_bxkx3x2[bidx][grididx][idx_one][0]; y1 = grid_bxkx3x2[bidx][grididx][idx_one][1]; x2 = grid_bxkx3x2[bidx][grididx][idx_two][0]; y2 = grid_bxkx3x2[bidx][grididx][idx_two][1]; cal_line_gradient(grad, x1, y1, x2, y2, x0, y0); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2)), float(dl_dmindist_element * grad[0] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2 + 1)), float(dl_dmindist_element * grad[1] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2)), float(dl_dmindist_element * grad[2] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2 + 1)), float(dl_dmindist_element * grad[3] / sigma * in_out_sign)); } else if (condition == 2){ in_out_sign = -1; x1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; y1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; float signx, signy; if (x1 > x0){ signx = 1; } else{ signx = -1; } if (y1 > y0){ signy = 1; } else{ signy = -1; } atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2)), float(signx * dl_dmindist_element / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2 + 1)), float(signy * dl_dmindist_element / sigma * in_out_sign)); } } void line_variance_parallel_cuda_backward_batch(at::Tensor dldvariance_bxn, at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, float sigma, at::Tensor dldreconstruct_bxnxd, at::Tensor buffer_bxnxk, at::Tensor dldgrid_bxkx3x2, at::Tensor buffer_bxn, at::Tensor buffer_bxnx4, int split_size) { int bnum = grid_bxkx3x2.size(0); int n_grid = grid_bxkx3x2.size(1); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); const int threadnum = BLOCK_SIZE; const int totalthread_1 = bnum * n_pixel * n_grid; const int blocknum_1 = totalthread_1 / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks_1(blocknum_1, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_backward_batch_calc_buffer", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_backword_kernel_batch_calc_buffer<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldgrid_bxkx3x2.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); const int totalthread_3 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_3 = totalthread_3 / threadnum + 1; const dim3 blocks_3(blocknum_3, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( blockReduceMax<scalar_t>), dim3(blocks_3), dim3(threads), 0, 0, buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_4 = bnum * n_pixel; const int blocknum_4 = totalthread_4 / threadnum + 1; const dim3 blocks_4(blocknum_4, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step<scalar_t>), dim3(blocks_4), dim3(threads), 0, 0, buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_5 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_5 = totalthread_5 / threadnum + 1; const dim3 blocks_5(blocknum_5, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( blockReduceSum<scalar_t>), dim3(blocks_5), dim3(threads), 0, 0, buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_6 = bnum * n_pixel; const int blocknum_6 = totalthread_6 / threadnum + 1; const dim3 blocks_6(blocknum_6, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t>), dim3(blocks_6), dim3(threads), 0, 0, buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_7 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_7 = totalthread_7 / threadnum + 1; const dim3 blocks_7(blocknum_7, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( blockReduceSum_sumGradient<scalar_t>), dim3(blocks_7), dim3(threads), 0, 0, img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, split_size); })); const int totalthread_8 = bnum * n_pixel; const int blocknum_8 = totalthread_8 / threadnum + 1; const dim3 blocks_8(blocknum_8, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t>), dim3(blocks_8), dim3(threads), 0, 0, buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int threadnum_9 = 512; const int totalthread_9 = bnum * n_pixel * n_grid; const int blocknum_9 = totalthread_9 / threadnum_9 + 1; const dim3 blocks_9(blocknum_9, 1, 1); const dim3 threads_9(threadnum_9, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_backward_batch_gradient", ([&] { hipLaunchKernelGGL(( line_variance_parallel_cuda_backword_kernel_batch_gradient<scalar_t>), dim3(blocks_9), dim3(threads_9), 0, 0, dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxn.data<scalar_t>(), dldgrid_bxkx3x2.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); return; }
a69d80ac3c572514ecfbe32236f1853d29897508.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <assert.h> #include <THC/THC.h> #include <vector> #include <torch/torch.h> #include <torch/extension.h> #define eps 1e-10 #define SCALE 1.0 #define MAX_DIS 9999999999.0 #include <sys/time.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_abs(scalar_t a){ if (a > 0.0){ return a; } else{ return -a; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_sign(scalar_t a){ if (a > 0.0){ return 1; } else if (a == 0.0){ return 0; } else{ return -1; } } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_square(scalar_t a){ return a * a; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; if (b < min_d){ min_d = b; } if (c < min_d){ min_d = c; } return min_d; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_min_dis_idx(scalar_t a, scalar_t b, scalar_t c){ scalar_t min_d = a; int min_idx = 0; if (b < min_d){ min_d = b; min_idx = 1; } if (c < min_d){ min_d = c; min_idx = 2; } return min_idx; } template<typename scalar_t> __host__ __device__ scalar_t line_variance_parallel_cuda_divide_non_zero(scalar_t a){ if (a == 0){ return eps; } if (a < 0){ return a - eps; } if (a > 0){ return a + eps; } return eps; } template <typename scalar_t> __host__ __device__ scalar_t distance_line(scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t d1 = -dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = -dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); return dis; } template <typename scalar_t> __host__ __device__ scalar_t distance_point(scalar_t x1, scalar_t y1, scalar_t x, scalar_t y){ return line_variance_parallel_cuda_abs(x - x1) + line_variance_parallel_cuda_abs(y - y1); } template <typename scalar_t> __host__ __device__ void cal_line_gradient(scalar_t* grad, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x, scalar_t y){ scalar_t dx1x2 = -x1 + x2; scalar_t dy1y2 = -y1 + y2; scalar_t dx1x = x - x1; scalar_t dy1y = y - y1; scalar_t c1 = - x * x1 + x * x2 + x1 * x1 - x1 * x2 - y * y1 + y * y2 + y1 * y1 - y1 * y2; scalar_t c2 = x1 * x1 - 2 * x1 * x2 + x2 * x2 + y1 * y1 - 2 * y1 * y2 + y2 * y2; scalar_t c12 = c1 / line_variance_parallel_cuda_divide_non_zero(c2 * c2); scalar_t cx = - dx1x - dx1x2; scalar_t cy = - dy1y - dy1y2; scalar_t d1 = - dx1x + dx1x2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); scalar_t d2 = - dy1y + dy1y2 * c1 / line_variance_parallel_cuda_divide_non_zero(c2); //scalar_t dis = line_variance_parallel_cuda_abs(d1) + line_variance_parallel_cuda_abs(d2); scalar_t dif_x1 = (2 * dx1x2 * dy1y2 * c12 + dy1y2 * cx / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2) + (2 * dx1x2 * dx1x2 * c12 + dx1x2 * cx / line_variance_parallel_cuda_divide_non_zero(c2) + 1 - c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1); scalar_t dif_y1 = (2 * dx1x2 * dy1y2 * c12 + dx1x2 * cy / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1) + (2 * dy1y2 * dy1y2 * c12 + dy1y2 * cy / line_variance_parallel_cuda_divide_non_zero(c2) + 1 - c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2); scalar_t dif_x2 = (dx1x * dy1y2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dy1y2 * c12) * line_variance_parallel_cuda_sign(d2) + (dx1x * dx1x2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dx1x2 * c12 + c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d1); scalar_t dif_y2 = (dx1x2 * dy1y / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dx1x2 * dy1y2 * c12) * line_variance_parallel_cuda_sign(d1) + (dy1y * dy1y2 / line_variance_parallel_cuda_divide_non_zero(c2) - 2 * dy1y2 * dy1y2 * c12 + c1 / line_variance_parallel_cuda_divide_non_zero(c2)) * line_variance_parallel_cuda_sign(d2); grad[0] = dif_x1; grad[1] = dif_y1; grad[2] = dif_x2; grad[3] = dif_y2; } template <typename scalar_t> __host__ __device__ void distance(scalar_t* ret, scalar_t x1, scalar_t y1, scalar_t x2, scalar_t y2, scalar_t x3, scalar_t y3, scalar_t x, scalar_t y) { //https://en.wikipedia.org/wiki/Barycentric_coordinate_system scalar_t x1_x2 = x1 - x2; scalar_t y1_y2 = y1 - y2; scalar_t x1_x3 = x1 - x3; scalar_t y1_y3 = y1 - y3; scalar_t x2_x3 = x2 - x3; scalar_t y2_y3 = y2 - y3; scalar_t x_x1 = x - x1; scalar_t y_y1 = y - y1; scalar_t x_x2 = x - x2; scalar_t y_y2 = y - y2; scalar_t x_x3 = x - x3; scalar_t y_y3 = y - y3; scalar_t k1 = y2_y3 * x_x3 - x2_x3 * y_y3; scalar_t k2 = x1_x3 * y_y3 - y1_y3 * x_x3; scalar_t k3 = y2_y3 * x1_x3 - x2_x3 * y1_y3; if(k3 == 0){ // not a legal triangle ret[0] = -2; return; } if(k3 > 0){ // clock-wise triangle ret[0] = -1; return; } scalar_t l1 = k1 / k3; scalar_t l2 = k2 / k3; scalar_t l3 = 1 - l1 - l2; scalar_t dis12 = distance_line(x1, y1, x2, y2, x, y); scalar_t dis23 = distance_line(x2, y2, x3, y3, x, y); scalar_t dis13 = distance_line(x1, y1, x3, y3, x, y); if (l1 >= 0 && l2 >= 0 && l3 >= 0){ // lie inside or on the boundary ret[0] = 0; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); ret[1] = min_dis_line; ret[2] = min_dis_line_idx; return; } // whether point can calculate distance to certain line bool within12 = ((y1_y2 * y_y1 + x_x1 * x1_x2) * (y1_y2 * y_y2 + x_x2 * x1_x2)) <= 0; bool within23 = ((y2_y3 * y_y3 + x_x3 * x2_x3) * (y2_y3 * y_y2 + x_x2 * x2_x3)) <= 0; bool within13 = ((y1_y3 * y_y1 + x_x1 * x1_x3) * (y1_y3 * y_y3 + x_x3 * x1_x3)) <= 0; dis12 = within12 ? dis12 : MAX_DIS; dis23 = within23 ? dis23 : MAX_DIS; dis13 = within13 ? dis13 : MAX_DIS; scalar_t min_dis_line = line_variance_parallel_cuda_min_dis(dis12, dis23, dis13); scalar_t min_dis_line_idx = line_variance_parallel_cuda_min_dis_idx(dis12, dis23, dis13); scalar_t d1 = distance_point(x1, y1, x, y); scalar_t d2 = distance_point(x2, y2, x, y); scalar_t d3 = distance_point(x3, y3, x, y); scalar_t min_dis_point = line_variance_parallel_cuda_min_dis(d1, d2, d3); scalar_t min_dis_point_idx = line_variance_parallel_cuda_min_dis_idx(d1, d2, d3); if (min_dis_line < min_dis_point){ //distance to line ret[0] = 1; ret[1] = min_dis_line; ret[2] = min_dis_line_idx; } else{ //distance to point ret[0] = 2; ret[1] = min_dis_point; ret[2] = min_dis_point_idx; } } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) return; int total_idx = bidx * n_pixel * n_grid + pixel_idx * n_grid; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t x1, y1, x2, y2; // tmp variable for calculating the gradients scalar_t min_distance = 0.0; scalar_t sum_exp = 0.0; int min_distance_idx = 0; int idx_one = 0; int idx_two = 0; scalar_t find_sign = 0.0; scalar_t max_dist = -MAX_DIS; scalar_t ax, ay, bx, by, cx, cy; int img_pos_total_idx = bidx * n_pixel * 2 + pixel_idx * 2; scalar_t ret[3] = {0}; scalar_t grad[4] = {0}; scalar_t condition; for (int grididx = 0; grididx < n_grid; grididx++){ ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0 && find_sign ==0){ min_distance = min_distance / sigma; find_sign == 1; } else{ min_distance = - min_distance / sigma; } max_dist = max_dist > min_distance ? max_dist : min_distance; buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } for (int grididx = 0; grididx < n_grid; grididx++){ buffer_bxnxk[bidx][pixel_idx][grididx] = expf(buffer_bxnxk[bidx][pixel_idx][grididx] - max_dist); sum_exp += buffer_bxnxk[bidx][pixel_idx][grididx]; } scalar_t sum_gradient = 0.0; scalar_t pixel_f = 0.0; scalar_t grid_f = 0.0; scalar_t diff = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; for (int grididx = 0; grididx < n_grid; grididx ++){ buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / line_variance_parallel_cuda_divide_non_zero(sum_exp); difference = 0.0; grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } sum_gradient += (buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + \ grid_f_sum)); } find_sign = 0.0; scalar_t dl_dmindist_element = 0.0; for (int grididx = 0; grididx < n_grid; grididx++){ scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } dl_dmindist_element = buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum) - \ sum_gradient * buffer_bxnxk[bidx][pixel_idx][grididx]; // gradient for the softmax // Get the minimum index distance ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; min_distance = ret[1]; min_distance_idx = ret[2]; int mem_gradient_idx = bidx * n_grid * 3 * 2 + grididx * 3 * 2; float in_out_sign; if (condition < 0){ continue; } if (condition == 0 || condition == 1){ in_out_sign = 1 - condition * 2; idx_one = min_distance_idx; idx_two = (min_distance_idx + 1 ) % 3; x1 = grid_bxkx3x2[bidx][grididx][idx_one][0]; y1 = grid_bxkx3x2[bidx][grididx][idx_one][1]; x2 = grid_bxkx3x2[bidx][grididx][idx_two][0]; y2 = grid_bxkx3x2[bidx][grididx][idx_two][1]; cal_line_gradient(grad, x1, y1, x2, y2, x0, y0); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2)), float(dl_dmindist_element * grad[0] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2 + 1)), float(dl_dmindist_element * grad[1] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2)), float(dl_dmindist_element * grad[2] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2 + 1)), float(dl_dmindist_element * grad[3] / sigma * in_out_sign)); } else{ in_out_sign = -1; x1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; y1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; float signx, signy; if (x1 > x0){ signx = 1; } else{ signx = -1; } if (y1 > y0){ signy = 1; } else{ signy = -1; } atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2)), float(signx * dl_dmindist_element / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2 + 1)), float(signy * dl_dmindist_element / sigma * in_out_sign)); } } } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch_calc_buffer( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) return; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t ax, ay, bx, by, cx, cy; scalar_t ret[3] = {0}; scalar_t condition; ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; scalar_t min_distance = ret[1]; if (condition < 0) { min_distance = - MAX_DIS; } else if (condition == 0){ min_distance = min_distance / sigma; } else{ min_distance = - min_distance / sigma; } buffer_bxnxk[bidx][pixel_idx][grididx] = min_distance; } #define BLOCK_SIZE 1024 #define WARP_SIZE 32 template <typename scalar_t> __inline__ __device__ scalar_t warpReduceSum(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val += __shfl_down(val, offset); } return val; } template <typename scalar_t> __global__ void blockReduceSum( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ max_dist_bxn, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ scalar_t max_dist = max_dist_bxn[bidx * n_pixel + pixel_idx]; buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] = expf(buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx] - max_dist); val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void blockReduceSum_sumGradient( const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ sum_exp_bxn, int bnum, int n_pixel, int n_grid, int d_fea, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = 0; scalar_t val = 0.0; scalar_t pixel_f = 0.0; scalar_t grid_f = 0.0; scalar_t diff = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; scalar_t sum_exp = sum_exp_bxn[pixel_idx]; if (pixel_idx < n_pixel && grididx < n_grid){ buffer_bxnxk[bidx][pixel_idx][grididx] = buffer_bxnxk[bidx][pixel_idx][grididx] / line_variance_parallel_cuda_divide_non_zero(sum_exp); difference = 0.0; grid_f_sum = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } val = (buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum)); } // sum_grad_bxn[bidx * n_pixel + pixel_idx] = sum_gradient; val = warpReduceSum(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : 0; if (wid==0){ val = warpReduceSum(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __inline__ __device__ scalar_t warpReduceMax(scalar_t val) { for (int offset = WARP_SIZE/2; offset > 0; offset /= 2){ val = max(val, __shfl_down(val, offset)); } return val; } template <typename scalar_t> __global__ void blockReduceMax( scalar_t* __restrict__ buffer_bxnxk, scalar_t* __restrict__ buffer_bxnx4, int bnum, int n_pixel, int n_grid, int split_size) { static __shared__ scalar_t shared[BLOCK_SIZE / WARP_SIZE]; // Shared mem for 32 partial sums int lane = threadIdx.x % WARP_SIZE; int wid = threadIdx.x / WARP_SIZE; // each thread loads one element from global to local register int presentthread = blockIdx.x*blockDim.x + threadIdx.x; int tid = threadIdx.x; int pixel_idx = (presentthread - tid) / (BLOCK_SIZE * split_size); int block_idx = blockIdx.x; int split = block_idx % split_size; int grididx = split * BLOCK_SIZE + tid; int bidx = 0; if (tid == 0 && pixel_idx < n_pixel) buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = -MAX_DIS; scalar_t val = -MAX_DIS; if (bidx < bnum && pixel_idx < n_pixel && grididx < n_grid){ val = buffer_bxnxk[bidx * n_pixel * n_grid + pixel_idx * n_grid + grididx]; } val = warpReduceMax(val); // Each warp performs partial reduction if (lane==0) shared[wid]=val; // Write reduced value to shared memory __syncthreads(); // Wait for all partial reductions //read from shared memory only if that warp existed val = (threadIdx.x < blockDim.x / WARP_SIZE) ? shared[lane] : -MAX_DIS; if (wid==0){ val = warpReduceMax(val); //Final reduce within first warp if (tid == 0 && pixel_idx < n_pixel){ buffer_bxnx4[bidx * n_pixel * split_size + pixel_idx * split_size + split] = val; } } } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t max_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ if(buffer_bxnx4[base_idx + t] > max_v){ max_v = buffer_bxnx4[base_idx + t]; } } buffer_bxn[bidx * n_pixel + pixel_idx] = max_v; } template <typename scalar_t> __global__ void line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step( scalar_t* __restrict__ buffer_bxnx4, scalar_t* __restrict__ buffer_bxn, int bnum, int n_pixel, int n_grid, int split_size) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int pixel_idx = presentthread % n_pixel; int bidx = (presentthread - pixel_idx) / n_pixel; if (bidx >= bnum || pixel_idx >= n_pixel) { return; } int base_idx = bidx * n_pixel * split_size + pixel_idx * split_size; scalar_t sum_v = buffer_bxnx4[base_idx + 0]; for (int t=1; t < split_size; t++){ sum_v += buffer_bxnx4[base_idx + t]; } buffer_bxn[bidx * n_pixel + pixel_idx] = sum_v; } template<typename scalar_t> __global__ void line_variance_parallel_cuda_backword_kernel_batch_gradient( const torch::PackedTensorAccessor<scalar_t, 2, torch::RestrictPtrTraits, size_t> dldvariance_bxn, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_fea_bxnxd, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> grid_fea_bxkxd, const torch::PackedTensorAccessor<scalar_t, 4, torch::RestrictPtrTraits, size_t> grid_bxkx3x2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> img_pos_bxnx2, const torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> dldreconstruct_bxnxd, torch::PackedTensorAccessor<scalar_t, 3, torch::RestrictPtrTraits, size_t> buffer_bxnxk, scalar_t* __restrict__ sum_grad_bxn, scalar_t* __restrict__ dldgrid_bxkx3x2, int bnum, int n_pixel, int n_grid, int d_fea, float sigma) { // bidx * height + heiidx int presentthread = blockIdx.x * blockDim.x + threadIdx.x; int grididx = presentthread % n_grid; int pixel_idx = (presentthread - grididx) / n_grid; int bidx = 0; if (bidx >= bnum || pixel_idx >= n_pixel || grididx >= n_grid) return; scalar_t pixel_x = img_pos_bxnx2[bidx][pixel_idx][0]; scalar_t pixel_y = img_pos_bxnx2[bidx][pixel_idx][1]; scalar_t x0 = pixel_x * SCALE; scalar_t y0 = pixel_y * SCALE; scalar_t ax, ay, bx, by, cx, cy; scalar_t x1, y1, x2, y2; // tmp variable for calculating the gradients scalar_t ret[3] = {0}; int min_distance_idx = 0; int idx_one = 0; int idx_two = 0; scalar_t grad[4] = {0}; scalar_t condition; scalar_t sum_gradient = sum_grad_bxn[bidx * n_pixel + pixel_idx]; scalar_t dl_dmindist_element = 0.0; scalar_t difference = 0.0; scalar_t grid_f_sum = 0.0; scalar_t grid_f = 0.0; scalar_t pixel_f = 0.0; scalar_t diff = 0.0; for (int d = 0; d < d_fea; d++){ grid_f = grid_fea_bxkxd[bidx][grididx][d]; pixel_f = img_fea_bxnxd[bidx][pixel_idx][d]; diff = line_variance_parallel_cuda_square(grid_f - pixel_f); difference = difference + diff; grid_f_sum += (dldreconstruct_bxnxd[bidx][pixel_idx][d] * grid_f); } dl_dmindist_element = buffer_bxnxk[bidx][pixel_idx][grididx] * (dldvariance_bxn[bidx][pixel_idx] * difference + grid_f_sum) - \ sum_gradient * buffer_bxnxk[bidx][pixel_idx][grididx]; // gradient for the softmax // Get the minimum index distance ax = grid_bxkx3x2[bidx][grididx][0][0] * SCALE; ay = grid_bxkx3x2[bidx][grididx][0][1] * SCALE; bx = grid_bxkx3x2[bidx][grididx][1][0] * SCALE; by = grid_bxkx3x2[bidx][grididx][1][1] * SCALE; cx = grid_bxkx3x2[bidx][grididx][2][0] * SCALE; cy = grid_bxkx3x2[bidx][grididx][2][1] * SCALE; distance(ret, ax, ay, bx, by, cx, cy, x0, y0); condition = ret[0]; scalar_t min_distance = ret[1]; min_distance_idx = static_cast<int>(ret[2]); int mem_gradient_idx = bidx * n_grid * 3 * 2 + grididx * 3 * 2; float in_out_sign; if (condition < 0){ return; } if (condition == 0 || condition == 1){ in_out_sign = 1 - condition * 2; idx_one = min_distance_idx; idx_two = (min_distance_idx + 1 ) % 3; x1 = grid_bxkx3x2[bidx][grididx][idx_one][0]; y1 = grid_bxkx3x2[bidx][grididx][idx_one][1]; x2 = grid_bxkx3x2[bidx][grididx][idx_two][0]; y2 = grid_bxkx3x2[bidx][grididx][idx_two][1]; cal_line_gradient(grad, x1, y1, x2, y2, x0, y0); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2)), float(dl_dmindist_element * grad[0] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_one * 2 + 1)), float(dl_dmindist_element * grad[1] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2)), float(dl_dmindist_element * grad[2] / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + idx_two * 2 + 1)), float(dl_dmindist_element * grad[3] / sigma * in_out_sign)); } else if (condition == 2){ in_out_sign = -1; x1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; y1 = grid_bxkx3x2[bidx][grididx][min_distance_idx][0]; float signx, signy; if (x1 > x0){ signx = 1; } else{ signx = -1; } if (y1 > y0){ signy = 1; } else{ signy = -1; } atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2)), float(signx * dl_dmindist_element / sigma * in_out_sign)); atomicAdd((float *)(dldgrid_bxkx3x2 + (mem_gradient_idx + min_distance_idx * 2 + 1)), float(signy * dl_dmindist_element / sigma * in_out_sign)); } } void line_variance_parallel_cuda_backward_batch(at::Tensor dldvariance_bxn, at::Tensor img_fea_bxnxd, at::Tensor grid_fea_bxkxd, at::Tensor grid_bxkx3x2, at::Tensor img_pos_bxnx2, float sigma, at::Tensor dldreconstruct_bxnxd, at::Tensor buffer_bxnxk, at::Tensor dldgrid_bxkx3x2, at::Tensor buffer_bxn, at::Tensor buffer_bxnx4, int split_size) { int bnum = grid_bxkx3x2.size(0); int n_grid = grid_bxkx3x2.size(1); int n_pixel = img_pos_bxnx2.size(1); int d_fea = img_fea_bxnxd.size(2); const int threadnum = BLOCK_SIZE; const int totalthread_1 = bnum * n_pixel * n_grid; const int blocknum_1 = totalthread_1 / threadnum + 1; const dim3 threads(threadnum, 1, 1); const dim3 blocks_1(blocknum_1, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_backward_batch_calc_buffer", ([&] { line_variance_parallel_cuda_backword_kernel_batch_calc_buffer<scalar_t><<<blocks_1, threads>>>( dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldgrid_bxkx3x2.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); const int totalthread_3 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_3 = totalthread_3 / threadnum + 1; const dim3 blocks_3(blocknum_3, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { blockReduceMax<scalar_t><<<blocks_3, threads>>>( buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_4 = bnum * n_pixel; const int blocknum_4 = totalthread_4 / threadnum + 1; const dim3 blocks_4(blocknum_4, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_max_reduce_last_step<scalar_t><<<blocks_4, threads>>>( buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_5 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_5 = totalthread_5 / threadnum + 1; const dim3 blocks_5(blocknum_5, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { blockReduceSum<scalar_t><<<blocks_5, threads>>>( buffer_bxnxk.data<scalar_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_6 = bnum * n_pixel; const int blocknum_6 = totalthread_6 / threadnum + 1; const dim3 blocks_6(blocknum_6, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t><<<blocks_6, threads>>>( buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int totalthread_7 = bnum * n_pixel * BLOCK_SIZE * split_size; const int blocknum_7 = totalthread_7 / threadnum + 1; const dim3 blocks_7(blocknum_7, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { blockReduceSum_sumGradient<scalar_t><<<blocks_7, threads>>>( img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, split_size); })); const int totalthread_8 = bnum * n_pixel; const int blocknum_8 = totalthread_8 / threadnum + 1; const dim3 blocks_8(blocknum_8, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_forward_batch_final", ([&] { line_variance_parallel_cuda_forward_kernel_batch_sum_reduce_last_step<scalar_t><<<blocks_8, threads>>>( buffer_bxnx4.data<scalar_t>(), buffer_bxn.data<scalar_t>(), bnum, n_pixel, n_grid, split_size); })); const int threadnum_9 = 512; const int totalthread_9 = bnum * n_pixel * n_grid; const int blocknum_9 = totalthread_9 / threadnum_9 + 1; const dim3 blocks_9(blocknum_9, 1, 1); const dim3 threads_9(threadnum_9, 1, 1); AT_DISPATCH_FLOATING_TYPES(grid_bxkx3x2.type(), "line_variance_parallel_cuda_backward_batch_gradient", ([&] { line_variance_parallel_cuda_backword_kernel_batch_gradient<scalar_t><<<blocks_9, threads_9>>>( dldvariance_bxn.packed_accessor<scalar_t, 2, torch::RestrictPtrTraits, size_t>(), img_fea_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_fea_bxkxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), grid_bxkx3x2.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(), img_pos_bxnx2.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), dldreconstruct_bxnxd.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxnxk.packed_accessor<scalar_t, 3, torch::RestrictPtrTraits, size_t>(), buffer_bxn.data<scalar_t>(), dldgrid_bxkx3x2.data<scalar_t>(), bnum, n_pixel, n_grid, d_fea, sigma); })); return; }
15ba0c54c02075ab296799c2e67b01d17d70f75a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void count( TUPLE *lt, TUPLE *rt, int *count, int *r_p, int *radix, int *l_p, int right, int left ) { int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x; //insert partition left table in shared memory __shared__ TUPLE sub_lt[JOIN_SHARED]; for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){ if(j<JOIN_SHARED){ sub_lt[j] = lt[i]; } } __syncthreads(); int temp=0; int temp2 = r_p[radix[blockIdx.x]+1]; int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x]; int count_x_temp = 0; for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){ temp = rt[k].val; for(int i=0; i<temp3 ;i++){ if(sub_lt[i].val == temp){ count_x_temp++; } } } count[x] = count_x_temp; if(x == gridDim.x*blockDim.x-1){ count[x+1] = 0; } } __global__ void join( TUPLE *lt, TUPLE *rt, RESULT *jt, int *count, int *r_p, int *radix, int *l_p, int right, int left ) { //int x = blockIdx.x*blockDim.x + threadIdx.x; int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x; __shared__ TUPLE sub_lt[JOIN_SHARED]; for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){ if(j<JOIN_SHARED){ sub_lt[j].key = lt[i].key; sub_lt[j].val = lt[i].val; } } __syncthreads(); TUPLE temp; int temp2 = r_p[radix[blockIdx.x]+1]; int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x]; int tcount=count[x]; for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){ temp.key = rt[k].key; temp.val = rt[k].val; for(int i=0; i<temp3 ;i++){ if(sub_lt[i].val == temp.val){ jt[tcount].rkey = temp.key; jt[tcount].rval = temp.val; jt[tcount].lkey = sub_lt[i].key; jt[tcount].lval = sub_lt[i].val; tcount++; } } } } }
15ba0c54c02075ab296799c2e67b01d17d70f75a.cu
#include <stdio.h> #include <stdint.h> #include <cuda.h> #include <sys/time.h> #include "tuple.h" extern "C" { __global__ void count( TUPLE *lt, TUPLE *rt, int *count, int *r_p, int *radix, int *l_p, int right, int left ) { int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x; //insert partition left table in shared memory __shared__ TUPLE sub_lt[JOIN_SHARED]; for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){ if(j<JOIN_SHARED){ sub_lt[j] = lt[i]; } } __syncthreads(); int temp=0; int temp2 = r_p[radix[blockIdx.x]+1]; int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x]; int count_x_temp = 0; for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){ temp = rt[k].val; for(int i=0; i<temp3 ;i++){ if(sub_lt[i].val == temp){ count_x_temp++; } } } count[x] = count_x_temp; if(x == gridDim.x*blockDim.x-1){ count[x+1] = 0; } } __global__ void join( TUPLE *lt, TUPLE *rt, RESULT *jt, int *count, int *r_p, int *radix, int *l_p, int right, int left ) { //int x = blockIdx.x*blockDim.x + threadIdx.x; int x = blockIdx.x*blockDim.x*gridDim.y + blockDim.x*blockIdx.y + threadIdx.x; __shared__ TUPLE sub_lt[JOIN_SHARED]; for(int i=l_p[blockIdx.x] + threadIdx.x,j=threadIdx.x; i<l_p[blockIdx.x+1]; i += blockDim.x, j += blockDim.x){ if(j<JOIN_SHARED){ sub_lt[j].key = lt[i].key; sub_lt[j].val = lt[i].val; } } __syncthreads(); TUPLE temp; int temp2 = r_p[radix[blockIdx.x]+1]; int temp3 = l_p[blockIdx.x+1] - l_p[blockIdx.x]; int tcount=count[x]; for(int k=r_p[radix[blockIdx.x]]+threadIdx.x ; k<temp2 ; k += blockDim.x){ temp.key = rt[k].key; temp.val = rt[k].val; for(int i=0; i<temp3 ;i++){ if(sub_lt[i].val == temp.val){ jt[tcount].rkey = temp.key; jt[tcount].rval = temp.val; jt[tcount].lkey = sub_lt[i].key; jt[tcount].lval = sub_lt[i].val; tcount++; } } } } }
76f980340188562d46e40307674a44ab43b1bfcd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** x = 11 k=*10000 **/ #include "vecmultKernel.h" //#include <stdio.h> __global__ void MultiplyVectors(const float* A, const float* B, float* C) { /* if (threadIdx.x == 0) { printf(":D\n"); } */ int B_start_index = blockIdx.x*ValuesPerThread; int A_start_index = threadIdx.x*ValuesPerThread; int C_width = gridDim.x*ValuesPerThread; int t; float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_0_10, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_1_10, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_2_10, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_3_10, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_4_10, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_5_10, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_6_10, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_7_10, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_8_10, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9, c_9_10, c_10_0, c_10_1, c_10_2, c_10_3, c_10_4, c_10_5, c_10_6, c_10_7, c_10_8, c_10_9, c_10_10; float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9, a_10; float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9, b_10; a_0 = A[A_start_index+0]; a_1 = A[A_start_index+1]; a_2 = A[A_start_index+2]; a_3 = A[A_start_index+3]; a_4 = A[A_start_index+4]; a_5 = A[A_start_index+5]; a_6 = A[A_start_index+6]; a_7 = A[A_start_index+7]; a_8 = A[A_start_index+8]; a_9 = A[A_start_index+9]; a_10 = A[A_start_index+10]; b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; b_3 = B[B_start_index+3]; b_4 = B[B_start_index+4]; b_5 = B[B_start_index+5]; b_6 = B[B_start_index+6]; b_7 = B[B_start_index+7]; b_8 = B[B_start_index+8]; b_9 = B[B_start_index+9]; b_10 = B[B_start_index+10]; c_0_0 = 0; c_0_1 = 0; c_0_2 = 0; c_0_3 = 0; c_0_4 = 0; c_0_5 = 0; c_0_6 = 0; c_0_7 = 0; c_0_8 = 0; c_0_9 = 0; c_0_10 = 0; c_1_0 = 0; c_1_1 = 0; c_1_2 = 0; c_1_3 = 0; c_1_4 = 0; c_1_5 = 0; c_1_6 = 0; c_1_7 = 0; c_1_8 = 0; c_1_9 = 0; c_1_10 = 0; c_2_0 = 0; c_2_1 = 0; c_2_2 = 0; c_2_3 = 0; c_2_4 = 0; c_2_5 = 0; c_2_6 = 0; c_2_7 = 0; c_2_8 = 0; c_2_9 = 0; c_2_10 = 0; c_3_0 = 0; c_3_1 = 0; c_3_2 = 0; c_3_3 = 0; c_3_4 = 0; c_3_5 = 0; c_3_6 = 0; c_3_7 = 0; c_3_8 = 0; c_3_9 = 0; c_3_10 = 0; c_4_0 = 0; c_4_1 = 0; c_4_2 = 0; c_4_3 = 0; c_4_4 = 0; c_4_5 = 0; c_4_6 = 0; c_4_7 = 0; c_4_8 = 0; c_4_9 = 0; c_4_10 = 0; c_5_0 = 0; c_5_1 = 0; c_5_2 = 0; c_5_3 = 0; c_5_4 = 0; c_5_5 = 0; c_5_6 = 0; c_5_7 = 0; c_5_8 = 0; c_5_9 = 0; c_5_10 = 0; c_6_0 = 0; c_6_1 = 0; c_6_2 = 0; c_6_3 = 0; c_6_4 = 0; c_6_5 = 0; c_6_6 = 0; c_6_7 = 0; c_6_8 = 0; c_6_9 = 0; c_6_10 = 0; c_7_0 = 0; c_7_1 = 0; c_7_2 = 0; c_7_3 = 0; c_7_4 = 0; c_7_5 = 0; c_7_6 = 0; c_7_7 = 0; c_7_8 = 0; c_7_9 = 0; c_7_10 = 0; c_8_0 = 0; c_8_1 = 0; c_8_2 = 0; c_8_3 = 0; c_8_4 = 0; c_8_5 = 0; c_8_6 = 0; c_8_7 = 0; c_8_8 = 0; c_8_9 = 0; c_8_10 = 0; c_9_0 = 0; c_9_1 = 0; c_9_2 = 0; c_9_3 = 0; c_9_4 = 0; c_9_5 = 0; c_9_6 = 0; c_9_7 = 0; c_9_8 = 0; c_9_9 = 0; c_9_10 = 0; c_10_0 = 0; c_10_1 = 0; c_10_2 = 0; c_10_3 = 0; c_10_4 = 0; c_10_5 = 0; c_10_6 = 0; c_10_7 = 0; c_10_8 = 0; c_10_9 = 0; c_10_10 = 0; for (t = 0; t < k; t++) { c_0_0 += a_0*b_0; c_0_1 += a_0*b_1; c_0_2 += a_0*b_2; c_0_3 += a_0*b_3; c_0_4 += a_0*b_4; c_0_5 += a_0*b_5; c_0_6 += a_0*b_6; c_0_7 += a_0*b_7; c_0_8 += a_0*b_8; c_0_9 += a_0*b_9; c_0_10 += a_0*b_10; c_1_0 += a_1*b_0; c_1_1 += a_1*b_1; c_1_2 += a_1*b_2; c_1_3 += a_1*b_3; c_1_4 += a_1*b_4; c_1_5 += a_1*b_5; c_1_6 += a_1*b_6; c_1_7 += a_1*b_7; c_1_8 += a_1*b_8; c_1_9 += a_1*b_9; c_1_10 += a_1*b_10; c_2_0 += a_2*b_0; c_2_1 += a_2*b_1; c_2_2 += a_2*b_2; c_2_3 += a_2*b_3; c_2_4 += a_2*b_4; c_2_5 += a_2*b_5; c_2_6 += a_2*b_6; c_2_7 += a_2*b_7; c_2_8 += a_2*b_8; c_2_9 += a_2*b_9; c_2_10 += a_2*b_10; c_3_0 += a_3*b_0; c_3_1 += a_3*b_1; c_3_2 += a_3*b_2; c_3_3 += a_3*b_3; c_3_4 += a_3*b_4; c_3_5 += a_3*b_5; c_3_6 += a_3*b_6; c_3_7 += a_3*b_7; c_3_8 += a_3*b_8; c_3_9 += a_3*b_9; c_3_10 += a_3*b_10; c_4_0 += a_4*b_0; c_4_1 += a_4*b_1; c_4_2 += a_4*b_2; c_4_3 += a_4*b_3; c_4_4 += a_4*b_4; c_4_5 += a_4*b_5; c_4_6 += a_4*b_6; c_4_7 += a_4*b_7; c_4_8 += a_4*b_8; c_4_9 += a_4*b_9; c_4_10 += a_4*b_10; c_5_0 += a_5*b_0; c_5_1 += a_5*b_1; c_5_2 += a_5*b_2; c_5_3 += a_5*b_3; c_5_4 += a_5*b_4; c_5_5 += a_5*b_5; c_5_6 += a_5*b_6; c_5_7 += a_5*b_7; c_5_8 += a_5*b_8; c_5_9 += a_5*b_9; c_5_10 += a_5*b_10; c_6_0 += a_6*b_0; c_6_1 += a_6*b_1; c_6_2 += a_6*b_2; c_6_3 += a_6*b_3; c_6_4 += a_6*b_4; c_6_5 += a_6*b_5; c_6_6 += a_6*b_6; c_6_7 += a_6*b_7; c_6_8 += a_6*b_8; c_6_9 += a_6*b_9; c_6_10 += a_6*b_10; c_7_0 += a_7*b_0; c_7_1 += a_7*b_1; c_7_2 += a_7*b_2; c_7_3 += a_7*b_3; c_7_4 += a_7*b_4; c_7_5 += a_7*b_5; c_7_6 += a_7*b_6; c_7_7 += a_7*b_7; c_7_8 += a_7*b_8; c_7_9 += a_7*b_9; c_7_10 += a_7*b_10; c_8_0 += a_8*b_0; c_8_1 += a_8*b_1; c_8_2 += a_8*b_2; c_8_3 += a_8*b_3; c_8_4 += a_8*b_4; c_8_5 += a_8*b_5; c_8_6 += a_8*b_6; c_8_7 += a_8*b_7; c_8_8 += a_8*b_8; c_8_9 += a_8*b_9; c_8_10 += a_8*b_10; c_9_0 += a_9*b_0; c_9_1 += a_9*b_1; c_9_2 += a_9*b_2; c_9_3 += a_9*b_3; c_9_4 += a_9*b_4; c_9_5 += a_9*b_5; c_9_6 += a_9*b_6; c_9_7 += a_9*b_7; c_9_8 += a_9*b_8; c_9_9 += a_9*b_9; c_9_10 += a_9*b_10; c_10_0 += a_10*b_0; c_10_1 += a_10*b_1; c_10_2 += a_10*b_2; c_10_3 += a_10*b_3; c_10_4 += a_10*b_4; c_10_5 += a_10*b_5; c_10_6 += a_10*b_6; c_10_7 += a_10*b_7; c_10_8 += a_10*b_8; c_10_9 += a_10*b_9; c_10_10 += a_10*b_10; a_0 = a_0*1.1f+1.7f; a_1 = a_1*1.1f+1.7f; a_2 = a_2*1.1f+1.7f; a_3 = a_3*1.1f+1.7f; a_4 = a_4*1.1f+1.7f; a_5 = a_5*1.1f+1.7f; a_6 = a_6*1.1f+1.7f; a_7 = a_7*1.1f+1.7f; a_8 = a_8*1.1f+1.7f; a_9 = a_9*1.1f+1.7f; a_10 = a_10*1.1f+1.7f; b_0 = b_0*1.1f+1.7f; b_1 = b_1*1.1f+1.7f; b_2 = b_2*1.1f+1.7f; b_3 = b_3*1.1f+1.7f; b_4 = b_4*1.1f+1.7f; b_5 = b_5*1.1f+1.7f; b_6 = b_6*1.1f+1.7f; b_7 = b_7*1.1f+1.7f; b_8 = b_8*1.1f+1.7f; b_9 = b_9*1.1f+1.7f; b_10 = b_10*1.1f+1.7f; } C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0; C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1; C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2; C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3; C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4; C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5; C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6; C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7; C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8; C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9; C[(A_start_index+0)*C_width + B_start_index+10] = c_0_10; C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0; C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1; C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2; C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3; C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4; C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5; C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6; C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7; C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8; C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9; C[(A_start_index+1)*C_width + B_start_index+10] = c_1_10; C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0; C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1; C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2; C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3; C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4; C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5; C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6; C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7; C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8; C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9; C[(A_start_index+2)*C_width + B_start_index+10] = c_2_10; C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0; C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1; C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2; C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3; C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4; C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5; C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6; C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7; C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8; C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9; C[(A_start_index+3)*C_width + B_start_index+10] = c_3_10; C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0; C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1; C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2; C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3; C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4; C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5; C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6; C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7; C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8; C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9; C[(A_start_index+4)*C_width + B_start_index+10] = c_4_10; C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0; C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1; C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2; C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3; C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4; C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5; C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6; C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7; C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8; C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9; C[(A_start_index+5)*C_width + B_start_index+10] = c_5_10; C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0; C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1; C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2; C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3; C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4; C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5; C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6; C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7; C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8; C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9; C[(A_start_index+6)*C_width + B_start_index+10] = c_6_10; C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0; C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1; C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2; C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3; C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4; C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5; C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6; C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7; C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8; C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9; C[(A_start_index+7)*C_width + B_start_index+10] = c_7_10; C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0; C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1; C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2; C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3; C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4; C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5; C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6; C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7; C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8; C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9; C[(A_start_index+8)*C_width + B_start_index+10] = c_8_10; C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0; C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1; C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2; C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3; C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4; C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5; C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6; C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7; C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8; C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9; C[(A_start_index+9)*C_width + B_start_index+10] = c_9_10; C[(A_start_index+10)*C_width + B_start_index+0] = c_10_0; C[(A_start_index+10)*C_width + B_start_index+1] = c_10_1; C[(A_start_index+10)*C_width + B_start_index+2] = c_10_2; C[(A_start_index+10)*C_width + B_start_index+3] = c_10_3; C[(A_start_index+10)*C_width + B_start_index+4] = c_10_4; C[(A_start_index+10)*C_width + B_start_index+5] = c_10_5; C[(A_start_index+10)*C_width + B_start_index+6] = c_10_6; C[(A_start_index+10)*C_width + B_start_index+7] = c_10_7; C[(A_start_index+10)*C_width + B_start_index+8] = c_10_8; C[(A_start_index+10)*C_width + B_start_index+9] = c_10_9; C[(A_start_index+10)*C_width + B_start_index+10] = c_10_10; /* if (threadIdx.x == 0) { printf("%d %d %f\n", blockIdx.x, threadIdx.x, C[0]); } */ }
76f980340188562d46e40307674a44ab43b1bfcd.cu
/** x = 11 k=*10000 **/ #include "vecmultKernel.h" //#include <stdio.h> __global__ void MultiplyVectors(const float* A, const float* B, float* C) { /* if (threadIdx.x == 0) { printf(":D\n"); } */ int B_start_index = blockIdx.x*ValuesPerThread; int A_start_index = threadIdx.x*ValuesPerThread; int C_width = gridDim.x*ValuesPerThread; int t; float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_0_10, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_1_10, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_2_10, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_3_10, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_4_10, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_5_10, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_6_10, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_7_10, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_8_10, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9, c_9_10, c_10_0, c_10_1, c_10_2, c_10_3, c_10_4, c_10_5, c_10_6, c_10_7, c_10_8, c_10_9, c_10_10; float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9, a_10; float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9, b_10; a_0 = A[A_start_index+0]; a_1 = A[A_start_index+1]; a_2 = A[A_start_index+2]; a_3 = A[A_start_index+3]; a_4 = A[A_start_index+4]; a_5 = A[A_start_index+5]; a_6 = A[A_start_index+6]; a_7 = A[A_start_index+7]; a_8 = A[A_start_index+8]; a_9 = A[A_start_index+9]; a_10 = A[A_start_index+10]; b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; b_3 = B[B_start_index+3]; b_4 = B[B_start_index+4]; b_5 = B[B_start_index+5]; b_6 = B[B_start_index+6]; b_7 = B[B_start_index+7]; b_8 = B[B_start_index+8]; b_9 = B[B_start_index+9]; b_10 = B[B_start_index+10]; c_0_0 = 0; c_0_1 = 0; c_0_2 = 0; c_0_3 = 0; c_0_4 = 0; c_0_5 = 0; c_0_6 = 0; c_0_7 = 0; c_0_8 = 0; c_0_9 = 0; c_0_10 = 0; c_1_0 = 0; c_1_1 = 0; c_1_2 = 0; c_1_3 = 0; c_1_4 = 0; c_1_5 = 0; c_1_6 = 0; c_1_7 = 0; c_1_8 = 0; c_1_9 = 0; c_1_10 = 0; c_2_0 = 0; c_2_1 = 0; c_2_2 = 0; c_2_3 = 0; c_2_4 = 0; c_2_5 = 0; c_2_6 = 0; c_2_7 = 0; c_2_8 = 0; c_2_9 = 0; c_2_10 = 0; c_3_0 = 0; c_3_1 = 0; c_3_2 = 0; c_3_3 = 0; c_3_4 = 0; c_3_5 = 0; c_3_6 = 0; c_3_7 = 0; c_3_8 = 0; c_3_9 = 0; c_3_10 = 0; c_4_0 = 0; c_4_1 = 0; c_4_2 = 0; c_4_3 = 0; c_4_4 = 0; c_4_5 = 0; c_4_6 = 0; c_4_7 = 0; c_4_8 = 0; c_4_9 = 0; c_4_10 = 0; c_5_0 = 0; c_5_1 = 0; c_5_2 = 0; c_5_3 = 0; c_5_4 = 0; c_5_5 = 0; c_5_6 = 0; c_5_7 = 0; c_5_8 = 0; c_5_9 = 0; c_5_10 = 0; c_6_0 = 0; c_6_1 = 0; c_6_2 = 0; c_6_3 = 0; c_6_4 = 0; c_6_5 = 0; c_6_6 = 0; c_6_7 = 0; c_6_8 = 0; c_6_9 = 0; c_6_10 = 0; c_7_0 = 0; c_7_1 = 0; c_7_2 = 0; c_7_3 = 0; c_7_4 = 0; c_7_5 = 0; c_7_6 = 0; c_7_7 = 0; c_7_8 = 0; c_7_9 = 0; c_7_10 = 0; c_8_0 = 0; c_8_1 = 0; c_8_2 = 0; c_8_3 = 0; c_8_4 = 0; c_8_5 = 0; c_8_6 = 0; c_8_7 = 0; c_8_8 = 0; c_8_9 = 0; c_8_10 = 0; c_9_0 = 0; c_9_1 = 0; c_9_2 = 0; c_9_3 = 0; c_9_4 = 0; c_9_5 = 0; c_9_6 = 0; c_9_7 = 0; c_9_8 = 0; c_9_9 = 0; c_9_10 = 0; c_10_0 = 0; c_10_1 = 0; c_10_2 = 0; c_10_3 = 0; c_10_4 = 0; c_10_5 = 0; c_10_6 = 0; c_10_7 = 0; c_10_8 = 0; c_10_9 = 0; c_10_10 = 0; for (t = 0; t < k; t++) { c_0_0 += a_0*b_0; c_0_1 += a_0*b_1; c_0_2 += a_0*b_2; c_0_3 += a_0*b_3; c_0_4 += a_0*b_4; c_0_5 += a_0*b_5; c_0_6 += a_0*b_6; c_0_7 += a_0*b_7; c_0_8 += a_0*b_8; c_0_9 += a_0*b_9; c_0_10 += a_0*b_10; c_1_0 += a_1*b_0; c_1_1 += a_1*b_1; c_1_2 += a_1*b_2; c_1_3 += a_1*b_3; c_1_4 += a_1*b_4; c_1_5 += a_1*b_5; c_1_6 += a_1*b_6; c_1_7 += a_1*b_7; c_1_8 += a_1*b_8; c_1_9 += a_1*b_9; c_1_10 += a_1*b_10; c_2_0 += a_2*b_0; c_2_1 += a_2*b_1; c_2_2 += a_2*b_2; c_2_3 += a_2*b_3; c_2_4 += a_2*b_4; c_2_5 += a_2*b_5; c_2_6 += a_2*b_6; c_2_7 += a_2*b_7; c_2_8 += a_2*b_8; c_2_9 += a_2*b_9; c_2_10 += a_2*b_10; c_3_0 += a_3*b_0; c_3_1 += a_3*b_1; c_3_2 += a_3*b_2; c_3_3 += a_3*b_3; c_3_4 += a_3*b_4; c_3_5 += a_3*b_5; c_3_6 += a_3*b_6; c_3_7 += a_3*b_7; c_3_8 += a_3*b_8; c_3_9 += a_3*b_9; c_3_10 += a_3*b_10; c_4_0 += a_4*b_0; c_4_1 += a_4*b_1; c_4_2 += a_4*b_2; c_4_3 += a_4*b_3; c_4_4 += a_4*b_4; c_4_5 += a_4*b_5; c_4_6 += a_4*b_6; c_4_7 += a_4*b_7; c_4_8 += a_4*b_8; c_4_9 += a_4*b_9; c_4_10 += a_4*b_10; c_5_0 += a_5*b_0; c_5_1 += a_5*b_1; c_5_2 += a_5*b_2; c_5_3 += a_5*b_3; c_5_4 += a_5*b_4; c_5_5 += a_5*b_5; c_5_6 += a_5*b_6; c_5_7 += a_5*b_7; c_5_8 += a_5*b_8; c_5_9 += a_5*b_9; c_5_10 += a_5*b_10; c_6_0 += a_6*b_0; c_6_1 += a_6*b_1; c_6_2 += a_6*b_2; c_6_3 += a_6*b_3; c_6_4 += a_6*b_4; c_6_5 += a_6*b_5; c_6_6 += a_6*b_6; c_6_7 += a_6*b_7; c_6_8 += a_6*b_8; c_6_9 += a_6*b_9; c_6_10 += a_6*b_10; c_7_0 += a_7*b_0; c_7_1 += a_7*b_1; c_7_2 += a_7*b_2; c_7_3 += a_7*b_3; c_7_4 += a_7*b_4; c_7_5 += a_7*b_5; c_7_6 += a_7*b_6; c_7_7 += a_7*b_7; c_7_8 += a_7*b_8; c_7_9 += a_7*b_9; c_7_10 += a_7*b_10; c_8_0 += a_8*b_0; c_8_1 += a_8*b_1; c_8_2 += a_8*b_2; c_8_3 += a_8*b_3; c_8_4 += a_8*b_4; c_8_5 += a_8*b_5; c_8_6 += a_8*b_6; c_8_7 += a_8*b_7; c_8_8 += a_8*b_8; c_8_9 += a_8*b_9; c_8_10 += a_8*b_10; c_9_0 += a_9*b_0; c_9_1 += a_9*b_1; c_9_2 += a_9*b_2; c_9_3 += a_9*b_3; c_9_4 += a_9*b_4; c_9_5 += a_9*b_5; c_9_6 += a_9*b_6; c_9_7 += a_9*b_7; c_9_8 += a_9*b_8; c_9_9 += a_9*b_9; c_9_10 += a_9*b_10; c_10_0 += a_10*b_0; c_10_1 += a_10*b_1; c_10_2 += a_10*b_2; c_10_3 += a_10*b_3; c_10_4 += a_10*b_4; c_10_5 += a_10*b_5; c_10_6 += a_10*b_6; c_10_7 += a_10*b_7; c_10_8 += a_10*b_8; c_10_9 += a_10*b_9; c_10_10 += a_10*b_10; a_0 = a_0*1.1f+1.7f; a_1 = a_1*1.1f+1.7f; a_2 = a_2*1.1f+1.7f; a_3 = a_3*1.1f+1.7f; a_4 = a_4*1.1f+1.7f; a_5 = a_5*1.1f+1.7f; a_6 = a_6*1.1f+1.7f; a_7 = a_7*1.1f+1.7f; a_8 = a_8*1.1f+1.7f; a_9 = a_9*1.1f+1.7f; a_10 = a_10*1.1f+1.7f; b_0 = b_0*1.1f+1.7f; b_1 = b_1*1.1f+1.7f; b_2 = b_2*1.1f+1.7f; b_3 = b_3*1.1f+1.7f; b_4 = b_4*1.1f+1.7f; b_5 = b_5*1.1f+1.7f; b_6 = b_6*1.1f+1.7f; b_7 = b_7*1.1f+1.7f; b_8 = b_8*1.1f+1.7f; b_9 = b_9*1.1f+1.7f; b_10 = b_10*1.1f+1.7f; } C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0; C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1; C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2; C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3; C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4; C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5; C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6; C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7; C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8; C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9; C[(A_start_index+0)*C_width + B_start_index+10] = c_0_10; C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0; C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1; C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2; C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3; C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4; C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5; C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6; C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7; C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8; C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9; C[(A_start_index+1)*C_width + B_start_index+10] = c_1_10; C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0; C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1; C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2; C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3; C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4; C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5; C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6; C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7; C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8; C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9; C[(A_start_index+2)*C_width + B_start_index+10] = c_2_10; C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0; C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1; C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2; C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3; C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4; C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5; C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6; C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7; C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8; C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9; C[(A_start_index+3)*C_width + B_start_index+10] = c_3_10; C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0; C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1; C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2; C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3; C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4; C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5; C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6; C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7; C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8; C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9; C[(A_start_index+4)*C_width + B_start_index+10] = c_4_10; C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0; C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1; C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2; C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3; C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4; C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5; C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6; C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7; C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8; C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9; C[(A_start_index+5)*C_width + B_start_index+10] = c_5_10; C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0; C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1; C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2; C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3; C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4; C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5; C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6; C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7; C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8; C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9; C[(A_start_index+6)*C_width + B_start_index+10] = c_6_10; C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0; C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1; C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2; C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3; C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4; C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5; C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6; C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7; C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8; C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9; C[(A_start_index+7)*C_width + B_start_index+10] = c_7_10; C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0; C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1; C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2; C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3; C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4; C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5; C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6; C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7; C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8; C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9; C[(A_start_index+8)*C_width + B_start_index+10] = c_8_10; C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0; C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1; C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2; C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3; C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4; C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5; C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6; C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7; C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8; C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9; C[(A_start_index+9)*C_width + B_start_index+10] = c_9_10; C[(A_start_index+10)*C_width + B_start_index+0] = c_10_0; C[(A_start_index+10)*C_width + B_start_index+1] = c_10_1; C[(A_start_index+10)*C_width + B_start_index+2] = c_10_2; C[(A_start_index+10)*C_width + B_start_index+3] = c_10_3; C[(A_start_index+10)*C_width + B_start_index+4] = c_10_4; C[(A_start_index+10)*C_width + B_start_index+5] = c_10_5; C[(A_start_index+10)*C_width + B_start_index+6] = c_10_6; C[(A_start_index+10)*C_width + B_start_index+7] = c_10_7; C[(A_start_index+10)*C_width + B_start_index+8] = c_10_8; C[(A_start_index+10)*C_width + B_start_index+9] = c_10_9; C[(A_start_index+10)*C_width + B_start_index+10] = c_10_10; /* if (threadIdx.x == 0) { printf("%d %d %f\n", blockIdx.x, threadIdx.x, C[0]); } */ }
e604e4abb654f67b01aa6ba0762e67ff00a566ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; const int matrixSize = 1024; __global__ void matrix_add(float *a, float *b, float *c) { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int id = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int id = threadIdx.x + threadIdx.y * blockDim.x; c[id] = a[id] + b[id]; } int main() { float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); h_c = (float*) malloc(bytes); hipMalloc((void **) &d_a, bytes); hipMalloc((void **) &d_b, bytes); hipMalloc((void **) &d_c, bytes); // init host arrays for (int i=0; i<matrixSize*matrixSize; i++) { h_a[i] = i; h_b[i] = i; h_c[i] = 0; } // init gpu arrays hipMemset(d_a, 0, bytes); hipMemset(d_b, 0, bytes); hipMemset(d_c, 0, bytes); // copy to gpu hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); // kernel call timer.Start(); hipLaunchKernelGGL(( matrix_add), dim3(8192), dim3(128), 0, 0, d_a, d_b, d_c); timer.Stop(); cout << timer.Elapsed() << "\n"; // copy to host hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost); /*for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_c[i*matrixSize + j]; } cout << endl; }*/ hipFree(d_a); hipFree(d_b); hipFree(d_c); scanf("%d", NULL); return 0; }
e604e4abb654f67b01aa6ba0762e67ff00a566ab.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; const int matrixSize = 1024; __global__ void matrix_add(float *a, float *b, float *c) { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int id = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; //int id = threadIdx.x + threadIdx.y * blockDim.x; c[id] = a[id] + b[id]; } int main() { float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); h_c = (float*) malloc(bytes); cudaMalloc((void **) &d_a, bytes); cudaMalloc((void **) &d_b, bytes); cudaMalloc((void **) &d_c, bytes); // init host arrays for (int i=0; i<matrixSize*matrixSize; i++) { h_a[i] = i; h_b[i] = i; h_c[i] = 0; } // init gpu arrays cudaMemset(d_a, 0, bytes); cudaMemset(d_b, 0, bytes); cudaMemset(d_c, 0, bytes); // copy to gpu cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // kernel call timer.Start(); matrix_add<<<8192, 128>>>(d_a, d_b, d_c); timer.Stop(); cout << timer.Elapsed() << "\n"; // copy to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); /*for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_c[i*matrixSize + j]; } cout << endl; }*/ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); scanf("%d", NULL); return 0; }
372408e96070842bba41d2e1918604b814ee1f60.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "backwardMaxPoolingKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int batchSize = XSIZE*YSIZE; int *lengths = NULL; hipMalloc(&lengths, XSIZE*YSIZE); int numberEntriesPerInstance = 1; int numberRows = 1; int *maxIndices = NULL; hipMalloc(&maxIndices, XSIZE*YSIZE); float *chain = NULL; hipMalloc(&chain, XSIZE*YSIZE); float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( backwardMaxPoolingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( backwardMaxPoolingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( backwardMaxPoolingKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
372408e96070842bba41d2e1918604b814ee1f60.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "backwardMaxPoolingKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int batchSize = XSIZE*YSIZE; int *lengths = NULL; cudaMalloc(&lengths, XSIZE*YSIZE); int numberEntriesPerInstance = 1; int numberRows = 1; int *maxIndices = NULL; cudaMalloc(&maxIndices, XSIZE*YSIZE); float *chain = NULL; cudaMalloc(&chain, XSIZE*YSIZE); float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); backwardMaxPoolingKernel<<<gridBlock,threadBlock>>>(batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { backwardMaxPoolingKernel<<<gridBlock,threadBlock>>>(batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { backwardMaxPoolingKernel<<<gridBlock,threadBlock>>>(batchSize,lengths,numberEntriesPerInstance,numberRows,maxIndices,chain,result); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
561737e6490e793801d4abd3496df4398a629667.hip
// !!! This is a file automatically generated by hipify!!! #include "Direct.cuh" #include "implementKernel.cuh" DirectGPU::DirectGPU(int n) { InitDevice(n); } void DirectGPU::Solve(Particle* particles, float timeStep, int n) { //sum accelerations EulerAcceleration << <threadsPerBlock, numberOfBlocks >> > (n, particles, timeStep); //sync since accelerations must be completed before integrating positions hipDeviceSynchronize(); //Integrate position using new velocity from acceleration EulerPosition << <threadsPerBlock, numberOfBlocks >> > (n, particles, timeStep); //sync before continuing update hipDeviceSynchronize(); } void DirectGPU::InitDevice(int n) { threadsPerBlock = 256; numberOfBlocks = (n + threadsPerBlock - 1) / threadsPerBlock; //find appropriate number of blocks }
561737e6490e793801d4abd3496df4398a629667.cu
#include "Direct.cuh" #include "implementKernel.cuh" DirectGPU::DirectGPU(int n) { InitDevice(n); } void DirectGPU::Solve(Particle* particles, float timeStep, int n) { //sum accelerations EulerAcceleration << <threadsPerBlock, numberOfBlocks >> > (n, particles, timeStep); //sync since accelerations must be completed before integrating positions cudaDeviceSynchronize(); //Integrate position using new velocity from acceleration EulerPosition << <threadsPerBlock, numberOfBlocks >> > (n, particles, timeStep); //sync before continuing update cudaDeviceSynchronize(); } void DirectGPU::InitDevice(int n) { threadsPerBlock = 256; numberOfBlocks = (n + threadsPerBlock - 1) / threadsPerBlock; //find appropriate number of blocks }
49497c8a4e7e4afd193faad6f26d0af70692de75.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // EXAMPLE OF INCLUSIVE PREFIX-SCAN CHAPTER 8 // Brent_Kung_scan // //////////////////////////////////////////////////////////////////////////// #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> //The maximum number of threads is of section elements #define SECTION_SIZE 2048 hipError_t Brent_Kung_scan(float *X, float *Y, unsigned int size, float *msTime); void sequential_scan(float *x, float *y, int Max_i); void print_Array(float *A, int size); int verify_result(float *Y, float *YS, int size); //////////////////////////////////////////////////////////////////////////////// //! Simple bad prefix sum //! @param X input data in global memory //! @param Y output data in global memory //! @param InputSize size of input and output data //////////////////////////////////////////////////////////////////////////////// __global__ void Brent_Kung_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; int i = 2 * blockIdx.x*blockDim.x + threadIdx.x; if (i < InputSize) XY[threadIdx.x] = X[i]; if (i + blockDim.x < InputSize) XY[threadIdx.x + blockDim.x] = X[i + blockDim.x]; for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < SECTION_SIZE) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1)*stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < InputSize) Y[i] = XY[threadIdx.x]; if (i + blockDim.x < InputSize) Y[i + blockDim.x] = XY[threadIdx.x + blockDim.x]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main() { const int arraySize = 2048; float *Y, *YS, *X; //float X[arraySize] = { 2,1,3,1,0,4,1,2,0,3,1,2,5,3,1,2 }; float msTime, msTime_seq; hipEvent_t startTimeCuda, stopTimeCuda; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); X = (float*)malloc(arraySize * sizeof(float)); Y = (float*)malloc(arraySize * sizeof(float)); YS = (float*)malloc(arraySize * sizeof(float)); //fill input vector for (int i = 0; i < arraySize; i++) { X[i] = (float)(i + 1.0); } //printf("Array input: "); //print_Array(X, arraySize); // ------------------ Perform sequential scan. ----------------------------- printf("Sequential scan...\n"); hipEventRecord(startTimeCuda, 0); hipEventSynchronize(startTimeCuda); sequential_scan(X, YS, arraySize); hipEventRecord(stopTimeCuda, 0); hipEventSynchronize(stopTimeCuda); hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n\n", msTime_seq); //printf(" OK!\n"); //print_Array(YS, arraySize); // ------------------ perform parallel scan. ------------------------------- printf("Parallel scan...\n"); hipError_t cudaStatus = Brent_Kung_scan(X, Y, arraySize, &msTime); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); goto Error; } //printf(" OK!\n"); //print_Array(Y, arraySize); // ------------------ verify the result. ----------------------------------- if (verify_result(Y, YS, arraySize)) { goto Error; } printf("TEST PASSED!\n"); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); goto Error; } printf("Speedup: %f\n", msTime_seq / msTime); free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 0; Error: free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 1; } // Helper function for using CUDA to perform scan in parallel. hipError_t Brent_Kung_scan(float *X, float *Y, unsigned int size, float *msTime) { float *dev_X, *dev_Y; hipError_t cudaStatus; hipEvent_t startTimeCuda, stopTimeCuda; hipEventCreate(&startTimeCuda); hipEventCreate(&stopTimeCuda); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for two vectors. cudaStatus = hipMalloc((void**)&dev_X, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_Y, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vector from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_X, X, size * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipEventRecord(startTimeCuda, 0); hipEventSynchronize(startTimeCuda); Brent_Kung_scan_kernel << < 1, SECTION_SIZE/2 >> >(dev_X, dev_Y, size); hipEventRecord(stopTimeCuda, 0); hipEventSynchronize(stopTimeCuda); hipEventElapsedTime(msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n\n", *msTime); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(Y, dev_Y, size * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_X); hipFree(dev_Y); return cudaStatus; } void sequential_scan(float *x, float *y, int Max_i) { float accumulator = x[0]; y[0] = accumulator; for (int i = 1; i < Max_i; i++) { accumulator += x[i]; y[i] = accumulator; } } void print_Array(float *A, int size) { for (int i = 0; i < size; i++) { printf("%.2f ", A[i]); } printf("\n\n"); } int verify_result(float *Y, float *YS, int size) { for (int i = 0; i < size; i++) { if (Y[i] != YS[i]) { printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i); return 1; } } return 0; }
49497c8a4e7e4afd193faad6f26d0af70692de75.cu
//////////////////////////////////////////////////////////////////////////// // // EXAMPLE OF INCLUSIVE PREFIX-SCAN CHAPTER 8 // Brent_Kung_scan // //////////////////////////////////////////////////////////////////////////// #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> //The maximum number of threads is of section elements #define SECTION_SIZE 2048 cudaError_t Brent_Kung_scan(float *X, float *Y, unsigned int size, float *msTime); void sequential_scan(float *x, float *y, int Max_i); void print_Array(float *A, int size); int verify_result(float *Y, float *YS, int size); //////////////////////////////////////////////////////////////////////////////// //! Simple bad prefix sum //! @param X input data in global memory //! @param Y output data in global memory //! @param InputSize size of input and output data //////////////////////////////////////////////////////////////////////////////// __global__ void Brent_Kung_scan_kernel(float *X, float *Y, int InputSize) { __shared__ float XY[SECTION_SIZE]; int i = 2 * blockIdx.x*blockDim.x + threadIdx.x; if (i < InputSize) XY[threadIdx.x] = X[i]; if (i + blockDim.x < InputSize) XY[threadIdx.x + blockDim.x] = X[i + blockDim.x]; for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x + 1) * 2 * stride - 1; if (index < SECTION_SIZE) { XY[index] += XY[index - stride]; } } for (int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x + 1)*stride * 2 - 1; if (index + stride < SECTION_SIZE) { XY[index + stride] += XY[index]; } } __syncthreads(); if (i < InputSize) Y[i] = XY[threadIdx.x]; if (i + blockDim.x < InputSize) Y[i + blockDim.x] = XY[threadIdx.x + blockDim.x]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main() { const int arraySize = 2048; float *Y, *YS, *X; //float X[arraySize] = { 2,1,3,1,0,4,1,2,0,3,1,2,5,3,1,2 }; float msTime, msTime_seq; cudaEvent_t startTimeCuda, stopTimeCuda; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); X = (float*)malloc(arraySize * sizeof(float)); Y = (float*)malloc(arraySize * sizeof(float)); YS = (float*)malloc(arraySize * sizeof(float)); //fill input vector for (int i = 0; i < arraySize; i++) { X[i] = (float)(i + 1.0); } //printf("Array input: "); //print_Array(X, arraySize); // ------------------ Perform sequential scan. ----------------------------- printf("Sequential scan...\n"); cudaEventRecord(startTimeCuda, 0); cudaEventSynchronize(startTimeCuda); sequential_scan(X, YS, arraySize); cudaEventRecord(stopTimeCuda, 0); cudaEventSynchronize(stopTimeCuda); cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda); printf("HostTime: %f\n\n", msTime_seq); //printf(" OK!\n"); //print_Array(YS, arraySize); // ------------------ perform parallel scan. ------------------------------- printf("Parallel scan...\n"); cudaError_t cudaStatus = Brent_Kung_scan(X, Y, arraySize, &msTime); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); goto Error; } //printf(" OK!\n"); //print_Array(Y, arraySize); // ------------------ verify the result. ----------------------------------- if (verify_result(Y, YS, arraySize)) { goto Error; } printf("TEST PASSED!\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); goto Error; } printf("Speedup: %f\n", msTime_seq / msTime); free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 0; Error: free(X); free(Y); free(YS); #ifdef WIN32 system("pause"); #endif // WIN32 return 1; } // Helper function for using CUDA to perform scan in parallel. cudaError_t Brent_Kung_scan(float *X, float *Y, unsigned int size, float *msTime) { float *dev_X, *dev_Y; cudaError_t cudaStatus; cudaEvent_t startTimeCuda, stopTimeCuda; cudaEventCreate(&startTimeCuda); cudaEventCreate(&stopTimeCuda); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for two vectors. cudaStatus = cudaMalloc((void**)&dev_X, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_Y, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vector from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_X, X, size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. cudaEventRecord(startTimeCuda, 0); cudaEventSynchronize(startTimeCuda); Brent_Kung_scan_kernel << < 1, SECTION_SIZE/2 >> >(dev_X, dev_Y, size); cudaEventRecord(stopTimeCuda, 0); cudaEventSynchronize(stopTimeCuda); cudaEventElapsedTime(msTime, startTimeCuda, stopTimeCuda); printf("KernelTime: %f\n\n", *msTime); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(Y, dev_Y, size * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_X); cudaFree(dev_Y); return cudaStatus; } void sequential_scan(float *x, float *y, int Max_i) { float accumulator = x[0]; y[0] = accumulator; for (int i = 1; i < Max_i; i++) { accumulator += x[i]; y[i] = accumulator; } } void print_Array(float *A, int size) { for (int i = 0; i < size; i++) { printf("%.2f ", A[i]); } printf("\n\n"); } int verify_result(float *Y, float *YS, int size) { for (int i = 0; i < size; i++) { if (Y[i] != YS[i]) { printf("Error Y[%d] = %.2f != %.2f = YS[%d]\n", i, Y[i], YS[i], i); return 1; } } return 0; }
4b63766811856b0759396c30f79b2ffc90602d01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ImageCleaner.h" #ifndef SIZEX #error Please define SIZEX. #endif #ifndef SIZEY #error Please define SIZEY. #endif #define PI 3.14159256 //---------------------------------------------------------------- // TODO: CREATE NEW KERNELS HERE. YOU CAN PLACE YOUR CALLS TO // THEM IN THE INDICATED SECTION INSIDE THE 'filterImage' // FUNCTION. // // BEGIN ADD KERNEL DEFINITIONS //---------------------------------------------------------------- __global__ void exampleKernel(float *real_image, float *imag_image, int size_x, int size_y) { // Currently does nothing } __global__ void pre_compute(float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; float fft_real = 0; float fft_imag = 0; int tx = threadIdx.x; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = -2 * PI * tx * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); cos_term[n*SIZEY + tx] = fft_real; sin_term[n*SIZEY + tx] = fft_imag; } __syncthreads(); } __global__ void pre_compute_i(float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; float fft_real = 0; float fft_imag = 0; int tx = threadIdx.x; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = 2 * PI * tx * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); cos_term[n*SIZEY + tx] = fft_real; sin_term[n*SIZEY + tx] = fft_imag; } __syncthreads(); } __global__ void cpu_fftx_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; // float real_mul =0 ; // float imag_mul =0 ; int tx = threadIdx.x; int bx = blockIdx.x * SIZEY; int idx = bx + tx; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; // __syncthreads(); real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value; imag_image[idx] = imag_value; // __syncthreads(); } __global__ void cpu_ifftx_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x * SIZEY; int idx = bx + tx; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { // float term = -2 * PI * threadIdx.x * n / SIZEY; // fft_real = cos(term); // fft_imag = sin(term); fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; // __syncthreads(); real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } // Testing // real_value = realInBuffer[tx] * 0.1; // imag_value = imagInBuffer[tx]* 5; // __syncthreads(); // realOutBuffer[threadIdx.x] = real_value; // imagOutBuffer[threadIdx.x] = imag_value; real_image[idx] = real_value/SIZEY; imag_image[idx] = imag_value/SIZEY; // real_image[blockIdx.x*SIZEX + threadIdx.x] = realOutBuffer[threadIdx.x]; // imag_image[blockIdx.x*SIZEX + threadIdx.x] = imagOutBuffer[threadIdx.x]; // __syncthreads(); } __global__ void cpu_ffty_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x ; int idx = bx + tx*SIZEX; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value; imag_image[idx] = imag_value; // __syncthreads(); } __global__ void cpu_iffty_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x ; int idx = bx + tx*SIZEX; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value/SIZEX; imag_image[idx] = imag_value/SIZEX; // __syncthreads(); } //__global__ void cpu_fftx_cuda_map(float *real_image, float *imag_image, int size_x, int size_y, float *real_map, float *imag_map) //{ // __shared__ float fft_real[SIZEX]; // __shared__ float fft_imag[SIZEX]; //} __global__ void cpu_ifftx_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; // Compute the value for this index float real_value = 0; float imag_value = 0; float fft_real; float fft_imag; if(threadIdx.x<SIZEY){ realInBuffer[threadIdx.x] = real_image[blockIdx.x*SIZEX + threadIdx.x]; imagInBuffer[threadIdx.x] = imag_image[blockIdx.x*SIZEX + threadIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = 2 * PI * threadIdx.x * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[blockIdx.x*SIZEX + threadIdx.x] = real_value/SIZEY; imag_image[blockIdx.x*SIZEX + threadIdx.x] = imag_value/SIZEY; } } __global__ void cpu_ffty_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; float fft_real; float fft_imag; // Compute the value for this index float real_value = 0; float imag_value = 0; if(threadIdx.x<SIZEX){ realInBuffer[threadIdx.x] = real_image[threadIdx.x*SIZEX + blockIdx.x]; imagInBuffer[threadIdx.x] = imag_image[threadIdx.x*SIZEX + blockIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEX; n++) { float term = -2 * PI * threadIdx.x * n / SIZEX; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[threadIdx.x*SIZEX + blockIdx.x] = real_value; imag_image[threadIdx.x*SIZEX + blockIdx.x] = imag_value; // __syncthreads(); } } __global__ void cpu_iffty_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; float fft_real; float fft_imag; // Compute the value for this index float real_value = 0; float imag_value = 0; if(threadIdx.x<SIZEX){ realInBuffer[threadIdx.x] = real_image[threadIdx.x*SIZEX + blockIdx.x]; imagInBuffer[threadIdx.x] = imag_image[threadIdx.x*SIZEX + blockIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEX; n++) { float term = 2 * PI * threadIdx.x * n / SIZEX; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[threadIdx.x*SIZEY + blockIdx.x] = real_value/SIZEX; imag_image[threadIdx.x*SIZEY + blockIdx.x] = imag_value/SIZEX; // __syncthreads(); } } __global__ void cpu_filter_cuda(float *real_image, float *imag_image, int size_x, int size_y) { int eightX = size_x/8; int eight7X = size_x - eightX; int eightY = size_y/8; int eight7Y = size_y - eightY; __syncthreads(); if(!(blockIdx.x < eightX && threadIdx.x < eightY) && !(blockIdx.x < eightX && threadIdx.x >= eight7Y) && !(blockIdx.x >= eight7X && threadIdx.x < eightY) && !(blockIdx.x >= eight7X && threadIdx.x >= eight7Y)) { // Zero out these values real_image[threadIdx.x*size_x + blockIdx.x] = 0; imag_image[threadIdx.x*size_x + blockIdx.x] = 0; } __syncthreads(); } //---------------------------------------------------------------- // END ADD KERNEL DEFINTIONS //---------------------------------------------------------------- __host__ float filterImage(float *real_image, float *imag_image, int size_x, int size_y) { // check that the sizes match up assert(size_x == SIZEX); assert(size_y == SIZEY); int matSize = size_x * size_y * sizeof(float); // These variables are for timing purposes float transferDown = 0, transferUp = 0, execution = 0; hipEvent_t start,stop; // Custom measurement // hipEvent_t start_me,stop_me; // float fftx = 0, ifftx = 0, filter = 0; CUDA_ERROR_CHECK(hipEventCreate(&start)); CUDA_ERROR_CHECK(hipEventCreate(&stop)); // Create a stream and initialize it hipStream_t filterStream; CUDA_ERROR_CHECK(hipStreamCreate(&filterStream)); // Alloc space on the device float *device_real, *device_imag; CUDA_ERROR_CHECK(hipMalloc((void**)&device_real, matSize)); CUDA_ERROR_CHECK(hipMalloc((void**)&device_imag, matSize)); float *cos_t, *sin_t; CUDA_ERROR_CHECK(hipMalloc((void**)&cos_t, matSize)); CUDA_ERROR_CHECK(hipMalloc((void**)&sin_t, matSize)); // float *real_m, *imag_m; // CUDA_ERROR_CHECK(hipMalloc((void**)&real_m, matSize)); // CUDA_ERROR_CHECK(hipMalloc((void**)&imag_m, matSize)); // Start timing for transfer down CUDA_ERROR_CHECK(hipEventRecord(start,filterStream)); // Here is where we copy matrices down to the device CUDA_ERROR_CHECK(hipMemcpy(device_real,real_image,matSize,hipMemcpyHostToDevice)); CUDA_ERROR_CHECK(hipMemcpy(device_imag,imag_image,matSize,hipMemcpyHostToDevice)); // Stop timing for transfer down CUDA_ERROR_CHECK(hipEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(hipEventSynchronize(stop)); CUDA_ERROR_CHECK(hipEventElapsedTime(&transferDown,start,stop)); // Start timing for the execution CUDA_ERROR_CHECK(hipEventRecord(start,filterStream)); //---------------------------------------------------------------- // TODO: YOU SHOULD PLACE ALL YOUR KERNEL EXECUTIONS // HERE BETWEEN THE CALLS FOR STARTING AND // FINISHING TIMING FOR THE EXECUTION PHASE // BEGIN ADD KERNEL CALLS //---------------------------------------------------------------- // This is an example kernel call, you should feel free to create // as many kernel calls as you feel are needed for your program // Each of the parameters are as follows: // 1. Number of thread blocks, can be either int or dim3 (see CUDA manual) // 2. Number of threads per thread block, can be either int or dim3 (see CUDA manual) // 3. Always should be '0' unless you read the CUDA manual and learn about dynamically allocating shared memory // 4. Stream to execute kernel on, should always be 'filterStream' // // Also note that you pass the pointers to the device memory to the kernel call //exampleKernel<<<1,128,0,filterStream>>>(device_real,device_imag,size_x,size_y); // CUDA_ERROR_CHECK(hipEventCreate(&start_me)); // CUDA_ERROR_CHECK(hipEventCreate(&stop_me)); // // cpu_fftx_cuda_map<<<SIZEY,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,real_m,imag_m); // cpu_fftx_cuda_reduce<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y,real_m,imag_m); dim3 fft_dims; fft_dims.x = SIZEY; fft_dims.y = 1; // CUDA_ERROR_CHECK(hipEventRecord(start_me,filterStream)); hipLaunchKernelGGL(( pre_compute), dim3(1),dim3(SIZEY),0,filterStream, cos_t, sin_t); hipLaunchKernelGGL(( cpu_fftx_cuda_pre), dim3(fft_dims),dim3(SIZEY),0,filterStream, device_real,device_imag,size_x,size_y,cos_t,sin_t); // CUDA_ERROR_CHECK(hipEventRecord(stop_me,filterStream)); // CUDA_ERROR_CHECK(hipEventSynchronize(stop_me)); // CUDA_ERROR_CHECK(hipEventElapsedTime(&fftx,start_me,stop_me)); // printf(" Cuda FFTx execution time: %f ms\n", fftx); hipLaunchKernelGGL(( cpu_ffty_cuda_pre), dim3(SIZEX),dim3(SIZEY),0,filterStream, device_real,device_imag,size_x,size_y,cos_t,sin_t); //cpu_ffty_cuda<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y); hipLaunchKernelGGL(( cpu_filter_cuda), dim3(SIZEX),dim3(SIZEY),0,filterStream, device_real,device_imag,size_x,size_y); hipLaunchKernelGGL(( pre_compute_i), dim3(1),dim3(SIZEY),0,filterStream, cos_t, sin_t); hipLaunchKernelGGL(( cpu_ifftx_cuda_pre), dim3(SIZEX),dim3(SIZEY),0,filterStream, device_real,device_imag,size_x,size_y,cos_t,sin_t); hipLaunchKernelGGL(( cpu_iffty_cuda_pre), dim3(SIZEX),dim3(SIZEY),0,filterStream, device_real,device_imag,size_x,size_y,cos_t,sin_t); //cpu_ifftx_cuda<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y); //cpu_iffty_cuda<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y); //---------------------------------------------------------------- // END ADD KERNEL CALLS //---------------------------------------------------------------- // Finish timimg for the execution CUDA_ERROR_CHECK(hipEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(hipEventSynchronize(stop)); CUDA_ERROR_CHECK(hipEventElapsedTime(&execution,start,stop)); // Start timing for the transfer up CUDA_ERROR_CHECK(hipEventRecord(start,filterStream)); // Here is where we copy matrices back from the device CUDA_ERROR_CHECK(hipMemcpy(real_image,device_real,matSize,hipMemcpyDeviceToHost)); CUDA_ERROR_CHECK(hipMemcpy(imag_image,device_imag,matSize,hipMemcpyDeviceToHost)); // Finish timing for transfer up CUDA_ERROR_CHECK(hipEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(hipEventSynchronize(stop)); CUDA_ERROR_CHECK(hipEventElapsedTime(&transferUp,start,stop)); // Synchronize the stream CUDA_ERROR_CHECK(hipStreamSynchronize(filterStream)); // Destroy the stream CUDA_ERROR_CHECK(hipStreamDestroy(filterStream)); // Destroy the events CUDA_ERROR_CHECK(hipEventDestroy(start)); CUDA_ERROR_CHECK(hipEventDestroy(stop)); // Free the memory CUDA_ERROR_CHECK(hipFree(device_real)); CUDA_ERROR_CHECK(hipFree(device_imag)); CUDA_ERROR_CHECK(hipFree(cos_t)); CUDA_ERROR_CHECK(hipFree(sin_t)); // Dump some usage statistics printf("CUDA IMPLEMENTATION STATISTICS:\n"); printf(" Host to Device Transfer Time: %f ms\n", transferDown); printf(" Kernel(s) Execution Time: %f ms\n", execution); printf(" Device to Host Transfer Time: %f ms\n", transferUp); float totalTime = transferDown + execution + transferUp; printf(" Total CUDA Execution Time: %f ms\n\n", totalTime); // Return the total time to transfer and execute return totalTime; }
4b63766811856b0759396c30f79b2ffc90602d01.cu
#include "ImageCleaner.h" #ifndef SIZEX #error Please define SIZEX. #endif #ifndef SIZEY #error Please define SIZEY. #endif #define PI 3.14159256 //---------------------------------------------------------------- // TODO: CREATE NEW KERNELS HERE. YOU CAN PLACE YOUR CALLS TO // THEM IN THE INDICATED SECTION INSIDE THE 'filterImage' // FUNCTION. // // BEGIN ADD KERNEL DEFINITIONS //---------------------------------------------------------------- __global__ void exampleKernel(float *real_image, float *imag_image, int size_x, int size_y) { // Currently does nothing } __global__ void pre_compute(float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; float fft_real = 0; float fft_imag = 0; int tx = threadIdx.x; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = -2 * PI * tx * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); cos_term[n*SIZEY + tx] = fft_real; sin_term[n*SIZEY + tx] = fft_imag; } __syncthreads(); } __global__ void pre_compute_i(float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; float fft_real = 0; float fft_imag = 0; int tx = threadIdx.x; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = 2 * PI * tx * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); cos_term[n*SIZEY + tx] = fft_real; sin_term[n*SIZEY + tx] = fft_imag; } __syncthreads(); } __global__ void cpu_fftx_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; // float real_mul =0 ; // float imag_mul =0 ; int tx = threadIdx.x; int bx = blockIdx.x * SIZEY; int idx = bx + tx; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; // __syncthreads(); real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value; imag_image[idx] = imag_value; // __syncthreads(); } __global__ void cpu_ifftx_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x * SIZEY; int idx = bx + tx; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { // float term = -2 * PI * threadIdx.x * n / SIZEY; // fft_real = cos(term); // fft_imag = sin(term); fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; // __syncthreads(); real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } // Testing // real_value = realInBuffer[tx] * 0.1; // imag_value = imagInBuffer[tx]* 5; // __syncthreads(); // realOutBuffer[threadIdx.x] = real_value; // imagOutBuffer[threadIdx.x] = imag_value; real_image[idx] = real_value/SIZEY; imag_image[idx] = imag_value/SIZEY; // real_image[blockIdx.x*SIZEX + threadIdx.x] = realOutBuffer[threadIdx.x]; // imag_image[blockIdx.x*SIZEX + threadIdx.x] = imagOutBuffer[threadIdx.x]; // __syncthreads(); } __global__ void cpu_ffty_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x ; int idx = bx + tx*SIZEX; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value; imag_image[idx] = imag_value; // __syncthreads(); } __global__ void cpu_iffty_cuda_pre(float *real_image, float *imag_image, int size_x, int size_y, float *cos_term, float *sin_term) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; __shared__ float fft_real_s[SIZEY]; __shared__ float fft_imag_s[SIZEY]; // __shared__ float realOutBuffer[SIZEY]; // __shared__ float imagOutBuffer[SIZEY]; // float fft_real = 0; // float fft_imag = 0; // Compute the value for this index float real_value = 0; float imag_value = 0; int tx = threadIdx.x; int bx = blockIdx.x ; int idx = bx + tx*SIZEX; realInBuffer[tx] = real_image[idx]; imagInBuffer[tx] = imag_image[idx]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { fft_real_s[tx] = cos_term[n*SIZEY + tx]; fft_imag_s[tx] = sin_term[n*SIZEY + tx]; real_value += (realInBuffer[n] * fft_real_s[tx]) - (imagInBuffer[n] * fft_imag_s[tx]); imag_value += (imagInBuffer[n] * fft_real_s[tx]) + (realInBuffer[n] * fft_imag_s[tx]); // __syncthreads(); } real_image[idx] = real_value/SIZEX; imag_image[idx] = imag_value/SIZEX; // __syncthreads(); } //__global__ void cpu_fftx_cuda_map(float *real_image, float *imag_image, int size_x, int size_y, float *real_map, float *imag_map) //{ // __shared__ float fft_real[SIZEX]; // __shared__ float fft_imag[SIZEX]; //} __global__ void cpu_ifftx_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; // Compute the value for this index float real_value = 0; float imag_value = 0; float fft_real; float fft_imag; if(threadIdx.x<SIZEY){ realInBuffer[threadIdx.x] = real_image[blockIdx.x*SIZEX + threadIdx.x]; imagInBuffer[threadIdx.x] = imag_image[blockIdx.x*SIZEX + threadIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEY; n++) { float term = 2 * PI * threadIdx.x * n / SIZEY; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[blockIdx.x*SIZEX + threadIdx.x] = real_value/SIZEY; imag_image[blockIdx.x*SIZEX + threadIdx.x] = imag_value/SIZEY; } } __global__ void cpu_ffty_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; float fft_real; float fft_imag; // Compute the value for this index float real_value = 0; float imag_value = 0; if(threadIdx.x<SIZEX){ realInBuffer[threadIdx.x] = real_image[threadIdx.x*SIZEX + blockIdx.x]; imagInBuffer[threadIdx.x] = imag_image[threadIdx.x*SIZEX + blockIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEX; n++) { float term = -2 * PI * threadIdx.x * n / SIZEX; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[threadIdx.x*SIZEX + blockIdx.x] = real_value; imag_image[threadIdx.x*SIZEX + blockIdx.x] = imag_value; // __syncthreads(); } } __global__ void cpu_iffty_cuda(float *real_image, float *imag_image, int size_x, int size_y) { // int BlockIndex = blockIdx.x * blockDim.x; // int index = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float realInBuffer[SIZEY]; __shared__ float imagInBuffer[SIZEY]; float fft_real; float fft_imag; // Compute the value for this index float real_value = 0; float imag_value = 0; if(threadIdx.x<SIZEX){ realInBuffer[threadIdx.x] = real_image[threadIdx.x*SIZEX + blockIdx.x]; imagInBuffer[threadIdx.x] = imag_image[threadIdx.x*SIZEX + blockIdx.x]; __syncthreads(); for(unsigned int n = 0; n < SIZEX; n++) { float term = 2 * PI * threadIdx.x * n / SIZEX; fft_real = cos(term); fft_imag = sin(term); real_value += (realInBuffer[n] * fft_real) - (imagInBuffer[n] * fft_imag); imag_value += (imagInBuffer[n] * fft_real) + (realInBuffer[n] * fft_imag); } real_image[threadIdx.x*SIZEY + blockIdx.x] = real_value/SIZEX; imag_image[threadIdx.x*SIZEY + blockIdx.x] = imag_value/SIZEX; // __syncthreads(); } } __global__ void cpu_filter_cuda(float *real_image, float *imag_image, int size_x, int size_y) { int eightX = size_x/8; int eight7X = size_x - eightX; int eightY = size_y/8; int eight7Y = size_y - eightY; __syncthreads(); if(!(blockIdx.x < eightX && threadIdx.x < eightY) && !(blockIdx.x < eightX && threadIdx.x >= eight7Y) && !(blockIdx.x >= eight7X && threadIdx.x < eightY) && !(blockIdx.x >= eight7X && threadIdx.x >= eight7Y)) { // Zero out these values real_image[threadIdx.x*size_x + blockIdx.x] = 0; imag_image[threadIdx.x*size_x + blockIdx.x] = 0; } __syncthreads(); } //---------------------------------------------------------------- // END ADD KERNEL DEFINTIONS //---------------------------------------------------------------- __host__ float filterImage(float *real_image, float *imag_image, int size_x, int size_y) { // check that the sizes match up assert(size_x == SIZEX); assert(size_y == SIZEY); int matSize = size_x * size_y * sizeof(float); // These variables are for timing purposes float transferDown = 0, transferUp = 0, execution = 0; cudaEvent_t start,stop; // Custom measurement // cudaEvent_t start_me,stop_me; // float fftx = 0, ifftx = 0, filter = 0; CUDA_ERROR_CHECK(cudaEventCreate(&start)); CUDA_ERROR_CHECK(cudaEventCreate(&stop)); // Create a stream and initialize it cudaStream_t filterStream; CUDA_ERROR_CHECK(cudaStreamCreate(&filterStream)); // Alloc space on the device float *device_real, *device_imag; CUDA_ERROR_CHECK(cudaMalloc((void**)&device_real, matSize)); CUDA_ERROR_CHECK(cudaMalloc((void**)&device_imag, matSize)); float *cos_t, *sin_t; CUDA_ERROR_CHECK(cudaMalloc((void**)&cos_t, matSize)); CUDA_ERROR_CHECK(cudaMalloc((void**)&sin_t, matSize)); // float *real_m, *imag_m; // CUDA_ERROR_CHECK(cudaMalloc((void**)&real_m, matSize)); // CUDA_ERROR_CHECK(cudaMalloc((void**)&imag_m, matSize)); // Start timing for transfer down CUDA_ERROR_CHECK(cudaEventRecord(start,filterStream)); // Here is where we copy matrices down to the device CUDA_ERROR_CHECK(cudaMemcpy(device_real,real_image,matSize,cudaMemcpyHostToDevice)); CUDA_ERROR_CHECK(cudaMemcpy(device_imag,imag_image,matSize,cudaMemcpyHostToDevice)); // Stop timing for transfer down CUDA_ERROR_CHECK(cudaEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(cudaEventSynchronize(stop)); CUDA_ERROR_CHECK(cudaEventElapsedTime(&transferDown,start,stop)); // Start timing for the execution CUDA_ERROR_CHECK(cudaEventRecord(start,filterStream)); //---------------------------------------------------------------- // TODO: YOU SHOULD PLACE ALL YOUR KERNEL EXECUTIONS // HERE BETWEEN THE CALLS FOR STARTING AND // FINISHING TIMING FOR THE EXECUTION PHASE // BEGIN ADD KERNEL CALLS //---------------------------------------------------------------- // This is an example kernel call, you should feel free to create // as many kernel calls as you feel are needed for your program // Each of the parameters are as follows: // 1. Number of thread blocks, can be either int or dim3 (see CUDA manual) // 2. Number of threads per thread block, can be either int or dim3 (see CUDA manual) // 3. Always should be '0' unless you read the CUDA manual and learn about dynamically allocating shared memory // 4. Stream to execute kernel on, should always be 'filterStream' // // Also note that you pass the pointers to the device memory to the kernel call //exampleKernel<<<1,128,0,filterStream>>>(device_real,device_imag,size_x,size_y); // CUDA_ERROR_CHECK(cudaEventCreate(&start_me)); // CUDA_ERROR_CHECK(cudaEventCreate(&stop_me)); // // cpu_fftx_cuda_map<<<SIZEY,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,real_m,imag_m); // cpu_fftx_cuda_reduce<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y,real_m,imag_m); dim3 fft_dims; fft_dims.x = SIZEY; fft_dims.y = 1; // CUDA_ERROR_CHECK(cudaEventRecord(start_me,filterStream)); pre_compute<<<1,SIZEY,0,filterStream>>>(cos_t, sin_t); cpu_fftx_cuda_pre<<<fft_dims,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,cos_t,sin_t); // CUDA_ERROR_CHECK(cudaEventRecord(stop_me,filterStream)); // CUDA_ERROR_CHECK(cudaEventSynchronize(stop_me)); // CUDA_ERROR_CHECK(cudaEventElapsedTime(&fftx,start_me,stop_me)); // printf(" Cuda FFTx execution time: %f ms\n", fftx); cpu_ffty_cuda_pre<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,cos_t,sin_t); //cpu_ffty_cuda<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y); cpu_filter_cuda<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y); pre_compute_i<<<1,SIZEY,0,filterStream>>>(cos_t, sin_t); cpu_ifftx_cuda_pre<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,cos_t,sin_t); cpu_iffty_cuda_pre<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y,cos_t,sin_t); //cpu_ifftx_cuda<<<SIZEX,SIZEY,0,filterStream>>>(device_real,device_imag,size_x,size_y); //cpu_iffty_cuda<<<SIZEY,SIZEX,0,filterStream>>>(device_real,device_imag,size_x,size_y); //---------------------------------------------------------------- // END ADD KERNEL CALLS //---------------------------------------------------------------- // Finish timimg for the execution CUDA_ERROR_CHECK(cudaEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(cudaEventSynchronize(stop)); CUDA_ERROR_CHECK(cudaEventElapsedTime(&execution,start,stop)); // Start timing for the transfer up CUDA_ERROR_CHECK(cudaEventRecord(start,filterStream)); // Here is where we copy matrices back from the device CUDA_ERROR_CHECK(cudaMemcpy(real_image,device_real,matSize,cudaMemcpyDeviceToHost)); CUDA_ERROR_CHECK(cudaMemcpy(imag_image,device_imag,matSize,cudaMemcpyDeviceToHost)); // Finish timing for transfer up CUDA_ERROR_CHECK(cudaEventRecord(stop,filterStream)); CUDA_ERROR_CHECK(cudaEventSynchronize(stop)); CUDA_ERROR_CHECK(cudaEventElapsedTime(&transferUp,start,stop)); // Synchronize the stream CUDA_ERROR_CHECK(cudaStreamSynchronize(filterStream)); // Destroy the stream CUDA_ERROR_CHECK(cudaStreamDestroy(filterStream)); // Destroy the events CUDA_ERROR_CHECK(cudaEventDestroy(start)); CUDA_ERROR_CHECK(cudaEventDestroy(stop)); // Free the memory CUDA_ERROR_CHECK(cudaFree(device_real)); CUDA_ERROR_CHECK(cudaFree(device_imag)); CUDA_ERROR_CHECK(cudaFree(cos_t)); CUDA_ERROR_CHECK(cudaFree(sin_t)); // Dump some usage statistics printf("CUDA IMPLEMENTATION STATISTICS:\n"); printf(" Host to Device Transfer Time: %f ms\n", transferDown); printf(" Kernel(s) Execution Time: %f ms\n", execution); printf(" Device to Host Transfer Time: %f ms\n", transferUp); float totalTime = transferDown + execution + transferUp; printf(" Total CUDA Execution Time: %f ms\n\n", totalTime); // Return the total time to transfer and execute return totalTime; }
b67ae25a9f82e817758c372ced569df406dd605e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <hipcub/hipcub.hpp> #include <iomanip> #include "caffe/FRCNN/frcnn_proposal_layer.hpp" #include "caffe/FRCNN/util/frcnn_utils.hpp" #include "caffe/FRCNN/util/frcnn_helper.hpp" #include "caffe/FRCNN/util/frcnn_param.hpp" #include "caffe/FRCNN/util/frcnn_gpu_nms.hpp" namespace caffe { namespace Frcnn { using std::vector; __global__ void GetIndex(const int n,int *indices){ CUDA_KERNEL_LOOP(index , n){ indices[index] = index; } } template <typename Dtype> __global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox, const int height, const int width, const int feat_stride, const int im_height, const int im_width, const int* sorted_indices, const float* anchors, float* const transform_bbox) { CUDA_KERNEL_LOOP(index , nthreads) { const int score_idx = sorted_indices[index]; const int i = score_idx % width; // width const int j = (score_idx % (width * height)) / width; // height const int k = score_idx / (width * height); // channel float *box = transform_bbox + index * 4; box[0] = anchors[k * 4 + 0] + i * feat_stride; box[1] = anchors[k * 4 + 1] + j * feat_stride; box[2] = anchors[k * 4 + 2] + i * feat_stride; box[3] = anchors[k * 4 + 3] + j * feat_stride; const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] }; float src_w = box[2] - box[0] + 1; float src_h = box[3] - box[1] + 1; float src_ctr_x = box[0] + 0.5 * src_w; float src_ctr_y = box[1] + 0.5 * src_h; float pred_ctr_x = det[0] * src_w + src_ctr_x; float pred_ctr_y = det[1] * src_h + src_ctr_y; float pred_w = exp(det[2]) * src_w; float pred_h = exp(det[3]) * src_h; box[0] = pred_ctr_x - 0.5 * pred_w; box[1] = pred_ctr_y - 0.5 * pred_h; box[2] = pred_ctr_x + 0.5 * pred_w; box[3] = pred_ctr_y + 0.5 * pred_h; box[0] = max(0.0f, min(box[0], im_width - 1.0)); box[1] = max(0.0f, min(box[1], im_height - 1.0)); box[2] = max(0.0f, min(box[2], im_width - 1.0)); box[3] = max(0.0f, min(box[3], im_height - 1.0)); } } __global__ void SelectBox(const int nthreads, const float *box, float min_size, int *flags) { CUDA_KERNEL_LOOP(index , nthreads) { if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) || (box[index * 4 + 3] - box[index * 4 + 1] < min_size)) { flags[index] = 0; } else { flags[index] = 1; } } } template <typename Dtype> __global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices, float *out_box, const Dtype *in_score, Dtype *out_score) { CUDA_KERNEL_LOOP(index , nthreads) { if ((index == 0 && selected_indices[index] == 1) || (index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) { out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0]; out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1]; out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2]; out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3]; if (in_score!=NULL && out_score!=NULL) { out_score[selected_indices[index] - 1] = in_score[index]; } } } } template <typename Dtype> __global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices, Dtype *top_data, const Dtype *in_score, Dtype* top_score) { CUDA_KERNEL_LOOP(index , nthreads) { top_data[index * 5] = 0; int keep_idx = keep_indices[index]; for (int j = 1; j < 5; ++j) { top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1]; } if (top_score != NULL && in_score != NULL) { top_score[index] = in_score[keep_idx]; } } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { // Forward_cpu(bottom, top); // return ; #if 1 DLOG(ERROR) << "========== enter proposal layer"; const Dtype *bottom_rpn_score = bottom[0]->gpu_data(); const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data(); // bottom data comes from host memory Dtype bottom_im_info[3]; CHECK_EQ(bottom[2]->count(), 3); CUDA_CHECK(hipMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, hipMemcpyDeviceToHost)); const int num = bottom[1]->num(); const int channes = bottom[1]->channels(); const int height = bottom[1]->height(); const int width = bottom[1]->width(); CHECK(num == 1) << "only single item batches are supported"; CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4"; const float im_height = bottom_im_info[0]; const float im_width = bottom_im_info[1]; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; float rpn_nms_thresh; int rpn_min_size; if (this->phase_ == TRAIN) { rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::rpn_nms_thresh; rpn_min_size = FrcnnParam::rpn_min_size; } else { rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh; rpn_min_size = FrcnnParam::test_rpn_min_size; } LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n; LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n; if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return; const int config_n_anchors = FrcnnParam::anchors.size() / 4; const int total_anchor_num = config_n_anchors * height * width; //Step 1. -------------------------------Sort the rpn result---------------------- // the first half of rpn_score is the bg score // Note that the sorting operator will change the order fg_scores (bottom_rpn_score) Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]); Dtype *sorted_scores = NULL; CUDA_CHECK(hipMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num)); cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores); int *indices = NULL; CUDA_CHECK(hipMalloc((void**)&indices, sizeof(int) * total_anchor_num)); hipLaunchKernelGGL(( GetIndex), dim3(caffe::CAFFE_GET_BLOCKS(total_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, total_anchor_num, indices); hipDeviceSynchronize(); int *sorted_indices = NULL; CUDA_CHECK(hipMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num)); cub::DoubleBuffer<int> d_values(indices, sorted_indices); void *sort_temp_storage_ = NULL; size_t sort_temp_storage_bytes_ = 0; // calculate the temp_storage_bytes hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_; CUDA_CHECK(hipMalloc(&sort_temp_storage_, sort_temp_storage_bytes_)); // sorting hipcub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); hipDeviceSynchronize(); //Step 2. ---------------------------bbox transform---------------------------- const int retained_anchor_num = ::min(total_anchor_num, rpn_pre_nms_top_n); // float *transform_bbox = NULL; // CUDA_CHECK(hipMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4)); hipLaunchKernelGGL(( BBoxTransformInv<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride, im_height, im_width, sorted_indices, anchors_, transform_bbox_); hipDeviceSynchronize(); //Step 3. -------------------------filter out small box----------------------- // select the box larger than min size // int *selected_flags = NULL; // CUDA_CHECK(hipMalloc(&selected_flags, sizeof(int) * retained_anchor_num)); hipLaunchKernelGGL(( SelectBox), dim3(caffe::CAFFE_GET_BLOCKS(retained_anchor_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_); hipDeviceSynchronize(); // cumulative sum up the flags to get the copy index int *selected_indices_ = NULL; CUDA_CHECK(hipMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num)); void *cumsum_temp_storage_ = NULL; size_t cumsum_temp_storage_bytes_ = 0; hipcub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_; CUDA_CHECK(hipMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_)); hipcub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); // CUDA_CHECK(hipFree(cumsum_temp_storage)); int selected_num = -1; hipMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), hipMemcpyDeviceToHost); CHECK_GT(selected_num, 0); Dtype *bbox_score_ = NULL; if (top.size() == 2) CUDA_CHECK(hipMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num)); hipLaunchKernelGGL(( SelectBoxByIndices), dim3(caffe::CAFFE_GET_BLOCKS(selected_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_); hipDeviceSynchronize(); //Step 4. -----------------------------apply nms------------------------------- DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh; vector<int> keep_indices(selected_num); int keep_num = -1; gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh); DLOG(ERROR) << "rpn num after gpu nms: " << keep_num; keep_num = ::min(keep_num, rpn_post_nms_top_n); DLOG(ERROR) << "========== copy to top"; hipMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, hipMemcpyHostToDevice); top[0]->Reshape(keep_num, 5, 1, 1); Dtype *top_data = top[0]->mutable_gpu_data(); Dtype *top_score = NULL; if (top.size() == 2) { top[1]->Reshape(keep_num, 1, 1, 1); top_score = top[1]->mutable_gpu_data(); } hipLaunchKernelGGL(( SelectBoxAftNMS), dim3(caffe::CAFFE_GET_BLOCKS(keep_num)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, 0, keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score); DLOG(ERROR) << "========== exit proposal layer"; //////////////////////////////////// // do not forget to free the malloc memory CUDA_CHECK(hipFree(sorted_scores)); CUDA_CHECK(hipFree(indices)); CUDA_CHECK(hipFree(sorted_indices)); CUDA_CHECK(hipFree(sort_temp_storage_)); CUDA_CHECK(hipFree(cumsum_temp_storage_)); CUDA_CHECK(hipFree(selected_indices_)); if (bbox_score_!=NULL) CUDA_CHECK(hipFree(bbox_score_)); #endif } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer); } // namespace frcnn } // namespace caffe
b67ae25a9f82e817758c372ced569df406dd605e.cu
// ------------------------------------------------------------------ // Fast R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Ross Girshick // ------------------------------------------------------------------ #include <cub/cub.cuh> #include <iomanip> #include "caffe/FRCNN/frcnn_proposal_layer.hpp" #include "caffe/FRCNN/util/frcnn_utils.hpp" #include "caffe/FRCNN/util/frcnn_helper.hpp" #include "caffe/FRCNN/util/frcnn_param.hpp" #include "caffe/FRCNN/util/frcnn_gpu_nms.hpp" namespace caffe { namespace Frcnn { using std::vector; __global__ void GetIndex(const int n,int *indices){ CUDA_KERNEL_LOOP(index , n){ indices[index] = index; } } template <typename Dtype> __global__ void BBoxTransformInv(const int nthreads, const Dtype* const bottom_rpn_bbox, const int height, const int width, const int feat_stride, const int im_height, const int im_width, const int* sorted_indices, const float* anchors, float* const transform_bbox) { CUDA_KERNEL_LOOP(index , nthreads) { const int score_idx = sorted_indices[index]; const int i = score_idx % width; // width const int j = (score_idx % (width * height)) / width; // height const int k = score_idx / (width * height); // channel float *box = transform_bbox + index * 4; box[0] = anchors[k * 4 + 0] + i * feat_stride; box[1] = anchors[k * 4 + 1] + j * feat_stride; box[2] = anchors[k * 4 + 2] + i * feat_stride; box[3] = anchors[k * 4 + 3] + j * feat_stride; const Dtype det[4] = { bottom_rpn_bbox[(k * 4 + 0) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 1) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 2) * height * width + j * width + i], bottom_rpn_bbox[(k * 4 + 3) * height * width + j * width + i] }; float src_w = box[2] - box[0] + 1; float src_h = box[3] - box[1] + 1; float src_ctr_x = box[0] + 0.5 * src_w; float src_ctr_y = box[1] + 0.5 * src_h; float pred_ctr_x = det[0] * src_w + src_ctr_x; float pred_ctr_y = det[1] * src_h + src_ctr_y; float pred_w = exp(det[2]) * src_w; float pred_h = exp(det[3]) * src_h; box[0] = pred_ctr_x - 0.5 * pred_w; box[1] = pred_ctr_y - 0.5 * pred_h; box[2] = pred_ctr_x + 0.5 * pred_w; box[3] = pred_ctr_y + 0.5 * pred_h; box[0] = max(0.0f, min(box[0], im_width - 1.0)); box[1] = max(0.0f, min(box[1], im_height - 1.0)); box[2] = max(0.0f, min(box[2], im_width - 1.0)); box[3] = max(0.0f, min(box[3], im_height - 1.0)); } } __global__ void SelectBox(const int nthreads, const float *box, float min_size, int *flags) { CUDA_KERNEL_LOOP(index , nthreads) { if ((box[index * 4 + 2] - box[index * 4 + 0] < min_size) || (box[index * 4 + 3] - box[index * 4 + 1] < min_size)) { flags[index] = 0; } else { flags[index] = 1; } } } template <typename Dtype> __global__ void SelectBoxByIndices(const int nthreads, const float *in_box, int *selected_indices, float *out_box, const Dtype *in_score, Dtype *out_score) { CUDA_KERNEL_LOOP(index , nthreads) { if ((index == 0 && selected_indices[index] == 1) || (index > 0 && selected_indices[index] == selected_indices[index - 1] + 1)) { out_box[(selected_indices[index] - 1) * 4 + 0] = in_box[index * 4 + 0]; out_box[(selected_indices[index] - 1) * 4 + 1] = in_box[index * 4 + 1]; out_box[(selected_indices[index] - 1) * 4 + 2] = in_box[index * 4 + 2]; out_box[(selected_indices[index] - 1) * 4 + 3] = in_box[index * 4 + 3]; if (in_score!=NULL && out_score!=NULL) { out_score[selected_indices[index] - 1] = in_score[index]; } } } } template <typename Dtype> __global__ void SelectBoxAftNMS(const int nthreads, const float *in_box, int *keep_indices, Dtype *top_data, const Dtype *in_score, Dtype* top_score) { CUDA_KERNEL_LOOP(index , nthreads) { top_data[index * 5] = 0; int keep_idx = keep_indices[index]; for (int j = 1; j < 5; ++j) { top_data[index * 5 + j] = in_box[keep_idx * 4 + j - 1]; } if (top_score != NULL && in_score != NULL) { top_score[index] = in_score[keep_idx]; } } } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype> *> &bottom, const vector<Blob<Dtype> *> &top) { // Forward_cpu(bottom, top); // return ; #if 1 DLOG(ERROR) << "========== enter proposal layer"; const Dtype *bottom_rpn_score = bottom[0]->gpu_data(); const Dtype *bottom_rpn_bbox = bottom[1]->gpu_data(); // bottom data comes from host memory Dtype bottom_im_info[3]; CHECK_EQ(bottom[2]->count(), 3); CUDA_CHECK(cudaMemcpy(bottom_im_info, bottom[2]->gpu_data(), sizeof(Dtype) * 3, cudaMemcpyDeviceToHost)); const int num = bottom[1]->num(); const int channes = bottom[1]->channels(); const int height = bottom[1]->height(); const int width = bottom[1]->width(); CHECK(num == 1) << "only single item batches are supported"; CHECK(channes % 4 == 0) << "rpn bbox pred channels should be divided by 4"; const float im_height = bottom_im_info[0]; const float im_width = bottom_im_info[1]; int rpn_pre_nms_top_n; int rpn_post_nms_top_n; float rpn_nms_thresh; int rpn_min_size; if (this->phase_ == TRAIN) { rpn_pre_nms_top_n = FrcnnParam::rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::rpn_nms_thresh; rpn_min_size = FrcnnParam::rpn_min_size; } else { rpn_pre_nms_top_n = FrcnnParam::test_rpn_pre_nms_top_n; rpn_post_nms_top_n = FrcnnParam::test_rpn_post_nms_top_n; rpn_nms_thresh = FrcnnParam::test_rpn_nms_thresh; rpn_min_size = FrcnnParam::test_rpn_min_size; } LOG_IF(ERROR, rpn_pre_nms_top_n <= 0 ) << "rpn_pre_nms_top_n : " << rpn_pre_nms_top_n; LOG_IF(ERROR, rpn_post_nms_top_n <= 0 ) << "rpn_post_nms_top_n : " << rpn_post_nms_top_n; if (rpn_pre_nms_top_n <= 0 || rpn_post_nms_top_n <= 0 ) return; const int config_n_anchors = FrcnnParam::anchors.size() / 4; const int total_anchor_num = config_n_anchors * height * width; //Step 1. -------------------------------Sort the rpn result---------------------- // the first half of rpn_score is the bg score // Note that the sorting operator will change the order fg_scores (bottom_rpn_score) Dtype *fg_scores = (Dtype*)(&bottom_rpn_score[total_anchor_num]); Dtype *sorted_scores = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_scores, sizeof(Dtype) * total_anchor_num)); cub::DoubleBuffer<Dtype> d_keys(fg_scores, sorted_scores); int *indices = NULL; CUDA_CHECK(cudaMalloc((void**)&indices, sizeof(int) * total_anchor_num)); GetIndex<<<caffe::CAFFE_GET_BLOCKS(total_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( total_anchor_num, indices); cudaDeviceSynchronize(); int *sorted_indices = NULL; CUDA_CHECK(cudaMalloc((void**)&sorted_indices, sizeof(int) * total_anchor_num)); cub::DoubleBuffer<int> d_values(indices, sorted_indices); void *sort_temp_storage_ = NULL; size_t sort_temp_storage_bytes_ = 0; // calculate the temp_storage_bytes cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); DLOG(ERROR) << "sort_temp_storage_bytes_ : " << sort_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&sort_temp_storage_, sort_temp_storage_bytes_)); // sorting cub::DeviceRadixSort::SortPairsDescending(sort_temp_storage_, sort_temp_storage_bytes_, d_keys, d_values, total_anchor_num); cudaDeviceSynchronize(); //Step 2. ---------------------------bbox transform---------------------------- const int retained_anchor_num = std::min(total_anchor_num, rpn_pre_nms_top_n); // float *transform_bbox = NULL; // CUDA_CHECK(cudaMalloc(&transform_bbox, sizeof(float) * retained_anchor_num * 4)); BBoxTransformInv<Dtype><<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, bottom_rpn_bbox, height, width, FrcnnParam::feat_stride, im_height, im_width, sorted_indices, anchors_, transform_bbox_); cudaDeviceSynchronize(); //Step 3. -------------------------filter out small box----------------------- // select the box larger than min size // int *selected_flags = NULL; // CUDA_CHECK(cudaMalloc(&selected_flags, sizeof(int) * retained_anchor_num)); SelectBox<<<caffe::CAFFE_GET_BLOCKS(retained_anchor_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( retained_anchor_num, transform_bbox_, bottom_im_info[2] * rpn_min_size, selected_flags_); cudaDeviceSynchronize(); // cumulative sum up the flags to get the copy index int *selected_indices_ = NULL; CUDA_CHECK(cudaMalloc((void**)&selected_indices_, sizeof(int) * retained_anchor_num)); void *cumsum_temp_storage_ = NULL; size_t cumsum_temp_storage_bytes_ = 0; cub::DeviceScan::InclusiveSum(cumsum_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); DLOG(ERROR) << "cumsum_temp_storage_bytes : " << cumsum_temp_storage_bytes_; CUDA_CHECK(cudaMalloc(&cumsum_temp_storage_, cumsum_temp_storage_bytes_)); cub::DeviceScan::InclusiveSum(sort_temp_storage_, cumsum_temp_storage_bytes_, selected_flags_, selected_indices_, retained_anchor_num); // CUDA_CHECK(cudaFree(cumsum_temp_storage)); int selected_num = -1; cudaMemcpy(&selected_num, &selected_indices_[retained_anchor_num - 1], sizeof(int), cudaMemcpyDeviceToHost); CHECK_GT(selected_num, 0); Dtype *bbox_score_ = NULL; if (top.size() == 2) CUDA_CHECK(cudaMalloc(&bbox_score_, sizeof(Dtype) * retained_anchor_num)); SelectBoxByIndices<<<caffe::CAFFE_GET_BLOCKS(selected_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( selected_num, transform_bbox_, selected_indices_, transform_bbox_, sorted_scores, bbox_score_); cudaDeviceSynchronize(); //Step 4. -----------------------------apply nms------------------------------- DLOG(ERROR) << "========== apply nms with rpn_nms_thresh : " << rpn_nms_thresh; vector<int> keep_indices(selected_num); int keep_num = -1; gpu_nms(&keep_indices[0], &keep_num, transform_bbox_, selected_num, 4, rpn_nms_thresh); DLOG(ERROR) << "rpn num after gpu nms: " << keep_num; keep_num = std::min(keep_num, rpn_post_nms_top_n); DLOG(ERROR) << "========== copy to top"; cudaMemcpy(gpu_keep_indices_, &keep_indices[0], sizeof(int) * keep_num, cudaMemcpyHostToDevice); top[0]->Reshape(keep_num, 5, 1, 1); Dtype *top_data = top[0]->mutable_gpu_data(); Dtype *top_score = NULL; if (top.size() == 2) { top[1]->Reshape(keep_num, 1, 1, 1); top_score = top[1]->mutable_gpu_data(); } SelectBoxAftNMS<<<caffe::CAFFE_GET_BLOCKS(keep_num), caffe::CAFFE_CUDA_NUM_THREADS>>>( keep_num, transform_bbox_, gpu_keep_indices_, top_data, bbox_score_, top_score); DLOG(ERROR) << "========== exit proposal layer"; //////////////////////////////////// // do not forget to free the malloc memory CUDA_CHECK(cudaFree(sorted_scores)); CUDA_CHECK(cudaFree(indices)); CUDA_CHECK(cudaFree(sorted_indices)); CUDA_CHECK(cudaFree(sort_temp_storage_)); CUDA_CHECK(cudaFree(cumsum_temp_storage_)); CUDA_CHECK(cudaFree(selected_indices_)); if (bbox_score_!=NULL) CUDA_CHECK(cudaFree(bbox_score_)); #endif } template <typename Dtype> void FrcnnProposalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> &top, const vector<bool> &propagate_down, const vector<Blob<Dtype> *> &bottom) { for (int i = 0; i < propagate_down.size(); ++i) { if (propagate_down[i]) { NOT_IMPLEMENTED; } } } INSTANTIATE_LAYER_GPU_FUNCS(FrcnnProposalLayer); } // namespace frcnn } // namespace caffe
1bcca8d5a473742abfb24fe1e4ef1c04d6dab75c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/images2neibs/kernel.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/images2neibs/kernel.cuh" #include <cstdio> #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { namespace images2neibs { #define grid_y_max 512 template <typename T> __global__ void forward_kernel( const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int WH, int WW) { int NC = N * C; int WP = WH * WW; for (int wp = threadIdx.x; wp < WP; wp += blockDim.x) { int nc = blockIdx.y; while (nc < NC) { int wh = wp / WW; int ww = wp % WW; int op = threadIdx.y + blockIdx.x * blockDim.y; if (op < OH * OW) { int oh = op / OW; int ow = op % OW; int ih = -ph + sh * oh + wh * dh; int iw = -pw + sw * ow + ww * dw; int dst_pos = nc * OH * OW * WH * WW + op * WH * WW + wp; int src_pos = nc * IH * IW + ih * IW + iw; dst[dst_pos] = (ih >= 0 && ih < IH && iw >= 0 && iw < IW) ? src[src_pos] : 0.0f; } nc += grid_y_max; } } } template <typename T> void forward( const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int wh, int ww, hipStream_t stream) { int spatial_size = OH * OW; int kernel_size = wh * ww; int tx = min(NR_THREADS, kernel_size); int ty = NR_THREADS / tx; megdnn_assert(ty > 0); int bx = DIVUP(spatial_size, ty); int by = N * C; hipLaunchKernelGGL(( forward_kernel), dim3(dim3(bx, ::min(grid_y_max, by))), dim3(dim3(tx, ty)), 0, stream, src, dst, N, C, IH, IW, OH, OW, ph, pw, sh, sw, dh, dw, wh, ww); after_kernel_launch(); } #undef grid_y_max template <typename T> __global__ void backward_kernel( const T* diff, T* grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int WH, int WW) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N * C * IH * IW) { int nc = id / (IH * IW); int ih = id % (IH * IW) / IW; int iw = id % (IH * IW) % IW; grad[nc * IH * IW + ih * IW + iw] = 0.0f; int oh_max = min((ih + ph) / sh, OH - 1); int oh_min = max((ih + ph - (WH - 1) * dh + sh - 1) / sh, 0); int ow_max = min((iw + pw) / sw, OW - 1); int ow_min = max((iw + pw - (WW - 1) * dw + sw - 1) / sw, 0); for (int oh = oh_min; oh <= oh_max; ++oh) for (int ow = ow_min; ow <= ow_max; ++ow) { if ((ih + ph - sh * oh) % dh == 0 && (iw + pw - sw * ow) % dw == 0) { int wh = ih + ph - sh * oh - (ih + ph - sh * oh) / dh * (dh - 1); int ww = iw + pw - sw * ow - (iw + pw - sw * ow) / dw * (dw - 1); grad[nc * IH * IW + ih * IW + iw] += diff[nc * OH * OW * WH * WW + oh * OW * WH * WW + ow * WH * WW + wh * WW + ww]; } } } } template <typename T> void backward( const T* diff, T* grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int wh, int ww, hipStream_t stream) { int threads = NR_THREADS; int blocks = DIVUP(N * C * IH * IW, threads); hipLaunchKernelGGL(( backward_kernel), dim3(blocks), dim3(threads), 0, stream, diff, grad, N, C, IH, IW, OH, OW, ph, pw, sh, sw, dh, dw, wh, ww); after_kernel_launch(); } #define INST(T) \ template void forward<T>( \ const T*, T*, int, int, int, int, int, int, int, int, int, int, int, int, \ int, int, hipStream_t); \ template void backward<T>( \ const T*, T*, int, int, int, int, int, int, int, int, int, int, int, int, \ int, int, hipStream_t); #define cb(DType) INST(DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace images2neibs } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
1bcca8d5a473742abfb24fe1e4ef1c04d6dab75c.cu
/** * \file dnn/src/cuda/images2neibs/kernel.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/images2neibs/kernel.cuh" #include <cstdio> #include "megdnn/dtype.h" #include "src/cuda/utils.cuh" namespace megdnn { namespace cuda { namespace images2neibs { #define grid_y_max 512 template <typename T> __global__ void forward_kernel( const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int WH, int WW) { int NC = N * C; int WP = WH * WW; for (int wp = threadIdx.x; wp < WP; wp += blockDim.x) { int nc = blockIdx.y; while (nc < NC) { int wh = wp / WW; int ww = wp % WW; int op = threadIdx.y + blockIdx.x * blockDim.y; if (op < OH * OW) { int oh = op / OW; int ow = op % OW; int ih = -ph + sh * oh + wh * dh; int iw = -pw + sw * ow + ww * dw; int dst_pos = nc * OH * OW * WH * WW + op * WH * WW + wp; int src_pos = nc * IH * IW + ih * IW + iw; dst[dst_pos] = (ih >= 0 && ih < IH && iw >= 0 && iw < IW) ? src[src_pos] : 0.0f; } nc += grid_y_max; } } } template <typename T> void forward( const T* src, T* dst, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int wh, int ww, cudaStream_t stream) { int spatial_size = OH * OW; int kernel_size = wh * ww; int tx = min(NR_THREADS, kernel_size); int ty = NR_THREADS / tx; megdnn_assert(ty > 0); int bx = DIVUP(spatial_size, ty); int by = N * C; forward_kernel<<<dim3(bx, std::min(grid_y_max, by)), dim3(tx, ty), 0, stream>>>( src, dst, N, C, IH, IW, OH, OW, ph, pw, sh, sw, dh, dw, wh, ww); after_kernel_launch(); } #undef grid_y_max template <typename T> __global__ void backward_kernel( const T* diff, T* grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int WH, int WW) { int id = threadIdx.x + blockIdx.x * blockDim.x; if (id < N * C * IH * IW) { int nc = id / (IH * IW); int ih = id % (IH * IW) / IW; int iw = id % (IH * IW) % IW; grad[nc * IH * IW + ih * IW + iw] = 0.0f; int oh_max = min((ih + ph) / sh, OH - 1); int oh_min = max((ih + ph - (WH - 1) * dh + sh - 1) / sh, 0); int ow_max = min((iw + pw) / sw, OW - 1); int ow_min = max((iw + pw - (WW - 1) * dw + sw - 1) / sw, 0); for (int oh = oh_min; oh <= oh_max; ++oh) for (int ow = ow_min; ow <= ow_max; ++ow) { if ((ih + ph - sh * oh) % dh == 0 && (iw + pw - sw * ow) % dw == 0) { int wh = ih + ph - sh * oh - (ih + ph - sh * oh) / dh * (dh - 1); int ww = iw + pw - sw * ow - (iw + pw - sw * ow) / dw * (dw - 1); grad[nc * IH * IW + ih * IW + iw] += diff[nc * OH * OW * WH * WW + oh * OW * WH * WW + ow * WH * WW + wh * WW + ww]; } } } } template <typename T> void backward( const T* diff, T* grad, int N, int C, int IH, int IW, int OH, int OW, int ph, int pw, int sh, int sw, int dh, int dw, int wh, int ww, cudaStream_t stream) { int threads = NR_THREADS; int blocks = DIVUP(N * C * IH * IW, threads); backward_kernel<<<blocks, threads, 0, stream>>>( diff, grad, N, C, IH, IW, OH, OW, ph, pw, sh, sw, dh, dw, wh, ww); after_kernel_launch(); } #define INST(T) \ template void forward<T>( \ const T*, T*, int, int, int, int, int, int, int, int, int, int, int, int, \ int, int, cudaStream_t); \ template void backward<T>( \ const T*, T*, int, int, int, int, int, int, int, int, int, int, int, int, \ int, int, cudaStream_t); #define cb(DType) INST(DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) } // namespace images2neibs } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
424e4854900a93f62a3fca5dee4db534a1cf9feb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> __global__ void kernel (float *out) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; sdata[tid] = 0.0; __syncthreads(); //extern __shared__ float device_memory[]; // Compute the index variable int idx = blockIdx.x*blockDim.x + threadIdx.x; //device_memory[threadIdx.x] += blockDim.x; sdata[tid] += blockDim.x; //device_memory[threadIdx.x] += threadIdx.x; // Do I need this to get the output? // Maybe not //__syncthreads(); //out[threadIdx.x] = blockIdx.x; out[tid] = sdata[tid]; } int main() { int nbins = 16; int dimx = 16; int num_bytes = dimx*sizeof(float); float *d_a=0, *h_a=0; // device and host pointers // Allocate memory on host (CPU) h_a = (float*)malloc(num_bytes); // Allocate memory on device (GPU) hipMalloc((void**)&d_a,num_bytes); // Check to see that there was enough memory for both // allocations. // If the memory allocation fails, it doesn't change the // pointer value. That is why we set them to be 0 at declaration, // and then see if they have changed or stayed the same. if (0==h_a || 0==d_a) { printf("couldn't allocate memory\n"); return 1; } // Initialize array to all 0's hipMemset(d_a,0,num_bytes); //-----------------------------------------------------------------------// // Some explanatory code /* // This will give us 256 thread blocks, arranged in a 16x16 grid. dim3 grid(16,16); // This will give us 256 threads/block, arranged in a 16x16 grid. dim3 block(16,16); hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block),0,0, XXX); // This is a shortcut for launching some thread blocks. // It launches a grid of 32 thread blocks arranged in a 1x32 grid // and 512 threads per block, arranged in a 1x512 array. kernel<<<32,512>>>(YYY); */ //dim3 grid,block; //block.x = 8; //grid.x = dimx/block.x; //kernel<<<grid,block>>>(d_a); //kernel<<<4,16>>>(d_a); dim3 grid(16,16); dim3 block(16,16); hipLaunchKernelGGL(( kernel), dim3(grid),dim3(block),0,0, d_a); hipDeviceSynchronize(); // Copy it back over hipMemcpy(h_a,d_a,num_bytes,hipMemcpyDeviceToHost); for (int i=0;i<dimx;i++) { printf("%f ",h_a[i]); } printf("\n"); free(h_a); hipFree(d_a); return 0; }
424e4854900a93f62a3fca5dee4db534a1cf9feb.cu
#include<stdio.h> __global__ void kernel (float *out) { // shared memory // the size is determined by the host application extern __shared__ float sdata[]; // access thread id const unsigned int tid = threadIdx.x; // access number of threads in this block const unsigned int num_threads = blockDim.x; sdata[tid] = 0.0; __syncthreads(); //extern __shared__ float device_memory[]; // Compute the index variable int idx = blockIdx.x*blockDim.x + threadIdx.x; //device_memory[threadIdx.x] += blockDim.x; sdata[tid] += blockDim.x; //device_memory[threadIdx.x] += threadIdx.x; // Do I need this to get the output? // Maybe not //__syncthreads(); //out[threadIdx.x] = blockIdx.x; out[tid] = sdata[tid]; } int main() { int nbins = 16; int dimx = 16; int num_bytes = dimx*sizeof(float); float *d_a=0, *h_a=0; // device and host pointers // Allocate memory on host (CPU) h_a = (float*)malloc(num_bytes); // Allocate memory on device (GPU) cudaMalloc((void**)&d_a,num_bytes); // Check to see that there was enough memory for both // allocations. // If the memory allocation fails, it doesn't change the // pointer value. That is why we set them to be 0 at declaration, // and then see if they have changed or stayed the same. if (0==h_a || 0==d_a) { printf("couldn't allocate memory\n"); return 1; } // Initialize array to all 0's cudaMemset(d_a,0,num_bytes); //-----------------------------------------------------------------------// // Some explanatory code /* // This will give us 256 thread blocks, arranged in a 16x16 grid. dim3 grid(16,16); // This will give us 256 threads/block, arranged in a 16x16 grid. dim3 block(16,16); kernel<<<grid,block,0,0>>>(XXX); // This is a shortcut for launching some thread blocks. // It launches a grid of 32 thread blocks arranged in a 1x32 grid // and 512 threads per block, arranged in a 1x512 array. kernel<<<32,512>>>(YYY); */ //dim3 grid,block; //block.x = 8; //grid.x = dimx/block.x; //kernel<<<grid,block>>>(d_a); //kernel<<<4,16>>>(d_a); dim3 grid(16,16); dim3 block(16,16); kernel<<<grid,block,0,0>>>(d_a); cudaThreadSynchronize(); // Copy it back over cudaMemcpy(h_a,d_a,num_bytes,cudaMemcpyDeviceToHost); for (int i=0;i<dimx;i++) { printf("%f ",h_a[i]); } printf("\n"); free(h_a); cudaFree(d_a); return 0; }
871b047215a5822a3001dd3cf918dcf80cf83447.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = Height * Width * CIN/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/64) * 64; if (block_i >= Height) { break; } int image_starting_idx = block_i * Width * CIN/128 + block_j * CIN/128; wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); if (threadIdx.x < 120) { int threadPart = threadIdx.x/60; int threadOffset = threadIdx.x%60; int GL_idx = threadPart * X_bit_offset + (threadOffset/10)*Width + threadOffset%10 + image_starting_idx; *(&shmem[128][0]+threadIdx.x) = X[GL_idx]; } __syncthreads(); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < int(9*CIN/128/4); tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int SHMEM_part = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int feature_expand_idx = SHMEM_part * 15 * CIN/2 + (SHMEM_offset/8)*10*CIN/128 + (SHMEM_offset%8)*CIN/128; int t = threadIdx.x % 4; int thread_expand_idx = feature_expand_idx + (tile_k*4+t)/(3*CIN/128)*10*(CIN/128) + (tile_k*4+t)%(3*CIN/128); shmem[SHMEM_i][t] = *(&shmem[128][0]+thread_expand_idx); SHMEM_i += 64; int weight_load_idx = SHMEM_part * 9 * CIN * COUT / 128 + (block_z + SHMEM_offset) * 9 * CIN/128; int thread_load_idx = weight_load_idx + (tile_k*4 + t) * CIN/128; shmem[SHMEM_i][t] = W[thread_load_idx]; __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 64 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Needs special handle for the remaining K. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)&shmem[0][0] + warpId*8*64 + (i*4+j) * 64; wmma::store_matrix_sync(tile_ptr, c[i][j], 8, wmma::mem_row_major); } } __syncthreads(); if (threadIdx.x < 32) { int num1 = 0; int num2 = 0; for (int j = 0; j < 32; j++) { int tile_i = threadIdx.x%16/8; int element_i = (threadIdx.x%16)%8; int tile_j = j%32/8; int element_j = (j%32)%8; int final_i = warpId * 8 + tile_i*4+tile_j; int final_j = element_i *8 + element_j; int v0 = *((int*)&shmem[0][0]+final_i*64+final_j); int v1 = *((int*)&shmem[0][0]+final_i*64+final_j+32); int v2 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j); int v3 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j+32); int tmp = v0 + 2*v1 + 2*v2 + 4*v3; int tmp1 = tmp&1; int tmp2 = tmp&2; num1 = (num1 << 1) | tmp1; num2 = (num2 << 1) | tmp2; } *(Output+(threadIdx.x/8)*Width + threadIdx.x%8) = num1; *(Output+(threadIdx.x/8)*Width + threadIdx.x%8+ Height*Width*COUT/32) = num2; } __syncthreads(); } } // void init_matrices(int4 *A, int4 *B){ // int *A_int = (int*) A; // int *B_int = (int*) B; // for(int i = 0; i < M_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // A_int[i*K_GLOBAL/32+j] = rand(); // } // } // for(int i = 0; i < N_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // B_int[i*K_GLOBAL/32+j] = 0xFFFFFFFF; // B_int[i*K_GLOBAL/32+j] = rand(); // } // } // } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } // void compute_ref(int4 *A, int4 *B, int *ref_C) { // int *A_int = (int*) A; // int *B_int = (int*) B; // for (int m = 0; m < M_GLOBAL; m++) { // for (int n = 0; n < N_GLOBAL; n++) { // int tmp = 0; // for (int k = 0; k < K_GLOBAL; k += 32) { // // bit vector from row A and column B, accumulation and addition. // tmp += popcnt(A_int[(m*K_GLOBAL + k)/32] ^ B_int[(n*K_GLOBAL + k)/32]); // } // // ref_C[m * K + n]= K - 2 * tmp; // ref_C[m * N_GLOBAL + n]= tmp; // } // } // } // void validate_results(int *C, int* ref_C, int M_, int N_) { // printf("Checking computed result for correctness: "); // bool correct = true; // double eps = 1.e-6; // machine zero // for(int i = 0; i < M_; i++) { // for(int j = 0; j < N_; j++) { // int idx = i*N_+j; // double dst = fabs(C[idx] - ref_C[idx]); // double abs = fabs(C[idx]) * fabs(ref_C[idx]); // double ref_err = dst / abs; // if (ref_err > eps) { // // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); // printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // // printf("non equal\n"); // correct = false; // } // } // } // printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int Height = 256; int Width = 32; int CIN = 128; int COUT = 128; int bit = 2; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * Height * Width * (CIN/128) * bit)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * bit)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * (COUT/128) * bit)); // #ifdef verify_output // printf("Preparing validation data for GPU...\n"); // int4 *W_h = NULL; // int4 *X_h = NULL; // int *Output_h = NULL; // X_h = (int4 *)malloc(sizeof(int4) * H * W * (CIN/128) * X_bit); // W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_bit); // Output_h = (int *)malloc(sizeof(int4) * H * W * (COUT/128) * X_bit); // init_matrices(A_h, B_h); // checkCudaErrors(hipMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); // checkCudaErrors(hipMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), hipMemcpyHostToDevice)); // #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( compute_conv_imma, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; for(int iter=0; iter<200; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (compute_conv_imma), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, Height, Width, CIN, COUT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/200.0f; printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); // #ifdef verify_output // printf("Validating results...\n"); // checkCudaErrors(hipMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); // int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); // /* Copmpute reference matrix on CPU */ // // compute_ref(A_h, B_h, C_ref); // /* validation results */ // // validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL); // #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(hipFree(reinterpret_cast<void *>(A))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(B))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
871b047215a5822a3001dd3cf918dcf80cf83447.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = Height * Width * CIN/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/64)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/64)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/64) * 64; if (block_i >= Height) { break; } int image_starting_idx = block_i * Width * CIN/128 + block_j * CIN/128; wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); if (threadIdx.x < 120) { int threadPart = threadIdx.x/60; int threadOffset = threadIdx.x%60; int GL_idx = threadPart * X_bit_offset + (threadOffset/10)*Width + threadOffset%10 + image_starting_idx; *(&shmem[128][0]+threadIdx.x) = X[GL_idx]; } __syncthreads(); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < int(9*CIN/128/4); tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int SHMEM_part = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int feature_expand_idx = SHMEM_part * 15 * CIN/2 + (SHMEM_offset/8)*10*CIN/128 + (SHMEM_offset%8)*CIN/128; int t = threadIdx.x % 4; int thread_expand_idx = feature_expand_idx + (tile_k*4+t)/(3*CIN/128)*10*(CIN/128) + (tile_k*4+t)%(3*CIN/128); shmem[SHMEM_i][t] = *(&shmem[128][0]+thread_expand_idx); SHMEM_i += 64; int weight_load_idx = SHMEM_part * 9 * CIN * COUT / 128 + (block_z + SHMEM_offset) * 9 * CIN/128; int thread_load_idx = weight_load_idx + (tile_k*4 + t) * CIN/128; shmem[SHMEM_i][t] = W[thread_load_idx]; __syncthreads(); // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 64 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j]); } } } __syncthreads(); } // Needs special handle for the remaining K. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = (int*)&shmem[0][0] + warpId*8*64 + (i*4+j) * 64; wmma::store_matrix_sync(tile_ptr, c[i][j], 8, wmma::mem_row_major); } } __syncthreads(); if (threadIdx.x < 32) { int num1 = 0; int num2 = 0; for (int j = 0; j < 32; j++) { int tile_i = threadIdx.x%16/8; int element_i = (threadIdx.x%16)%8; int tile_j = j%32/8; int element_j = (j%32)%8; int final_i = warpId * 8 + tile_i*4+tile_j; int final_j = element_i *8 + element_j; int v0 = *((int*)&shmem[0][0]+final_i*64+final_j); int v1 = *((int*)&shmem[0][0]+final_i*64+final_j+32); int v2 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j); int v3 = *((int*)&shmem[0][0]+(final_i+32)*64+final_j+32); int tmp = v0 + 2*v1 + 2*v2 + 4*v3; int tmp1 = tmp&1; int tmp2 = tmp&2; num1 = (num1 << 1) | tmp1; num2 = (num2 << 1) | tmp2; } *(Output+(threadIdx.x/8)*Width + threadIdx.x%8) = num1; *(Output+(threadIdx.x/8)*Width + threadIdx.x%8+ Height*Width*COUT/32) = num2; } __syncthreads(); } } // void init_matrices(int4 *A, int4 *B){ // int *A_int = (int*) A; // int *B_int = (int*) B; // for(int i = 0; i < M_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // A_int[i*K_GLOBAL/32+j] = rand(); // } // } // for(int i = 0; i < N_GLOBAL; i++) { // for(int j = 0; j < K_GLOBAL/32; j++) { // B_int[i*K_GLOBAL/32+j] = 0xFFFFFFFF; // B_int[i*K_GLOBAL/32+j] = rand(); // } // } // } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } // void compute_ref(int4 *A, int4 *B, int *ref_C) { // int *A_int = (int*) A; // int *B_int = (int*) B; // for (int m = 0; m < M_GLOBAL; m++) { // for (int n = 0; n < N_GLOBAL; n++) { // int tmp = 0; // for (int k = 0; k < K_GLOBAL; k += 32) { // // bit vector from row A and column B, accumulation and addition. // tmp += popcnt(A_int[(m*K_GLOBAL + k)/32] ^ B_int[(n*K_GLOBAL + k)/32]); // } // // ref_C[m * K + n]= K - 2 * tmp; // ref_C[m * N_GLOBAL + n]= tmp; // } // } // } // void validate_results(int *C, int* ref_C, int M_, int N_) { // printf("Checking computed result for correctness: "); // bool correct = true; // double eps = 1.e-6; // machine zero // for(int i = 0; i < M_; i++) { // for(int j = 0; j < N_; j++) { // int idx = i*N_+j; // double dst = fabs(C[idx] - ref_C[idx]); // double abs = fabs(C[idx]) * fabs(ref_C[idx]); // double ref_err = dst / abs; // if (ref_err > eps) { // // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); // printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // // printf("non equal\n"); // correct = false; // } // } // } // printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int Height = 256; int Width = 32; int CIN = 128; int COUT = 128; int bit = 2; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * Height * Width * (CIN/128) * bit)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * bit)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * (COUT/128) * bit)); // #ifdef verify_output // printf("Preparing validation data for GPU...\n"); // int4 *W_h = NULL; // int4 *X_h = NULL; // int *Output_h = NULL; // X_h = (int4 *)malloc(sizeof(int4) * H * W * (CIN/128) * X_bit); // W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_bit); // Output_h = (int *)malloc(sizeof(int4) * H * W * (COUT/128) * X_bit); // init_matrices(A_h, B_h); // checkCudaErrors(cudaMemcpy(A, A_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); // checkCudaErrors(cudaMemcpy(B, B_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128), cudaMemcpyHostToDevice)); // #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( compute_conv_imma, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; for(int iter=0; iter<200; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (compute_conv_imma<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, Height, Width, CIN, COUT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/200.0f; printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); // #ifdef verify_output // printf("Validating results...\n"); // checkCudaErrors(cudaMemcpy(C_h, C, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); // int *C_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); // /* Copmpute reference matrix on CPU */ // // compute_ref(A_h, B_h, C_ref); // /* validation results */ // // validate_results(C_h, C_ref, M_GLOBAL, N_GLOBAL); // #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(A))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(B))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
403a40f14f67e287c00661d7544b977801d5071c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "util.h" using namespace std; #define chunk 256 void clockStart(); void clockStop(const char * str); float sum_matrix(const float *M, int width); void print_matrix(const float *M, int width); void clean_matrix(float *M, int width); void init_matrix(float *M, int width); //Kernel // Suma por columnas de una matriz con un solo bloque __global__ void MatrixSumKernel_0(int M, float* A_dev, float* SumPar_dev){ // Pvalue es usado para el valor intermedio double Pvalue = 0; int offset = threadIdx.y * M; for (int k = 0; k < M; k++) { Pvalue = Pvalue + A_dev[offset+k]; } SumPar_dev[threadIdx.y] = Pvalue; } __global__ void MatrixSumKernel_1(int M, float* A_dev, float* SumPar_dev){ // Pvalue es usado para el valor intermedio float Pvalue = 0; int columna = blockIdx.x; int fCol = columna * gridDim.x; for (int k = 0; k < M; k++) { Pvalue = Pvalue + A_dev[fCol+k]; } SumPar_dev[blockIdx.x] = Pvalue; } __global__ void MatrixSumKernel_2(int M,float* A_dev, float* SumPar_dev){ float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int pasos = M/ blockDim.x; int step = fCol + threadIdx.x*pasos; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_3(int M,float* A_dev, float* SumPar_dev){ int pasos; if(blockDim.x>M){ pasos = 1; }else{ pasos = M / blockDim.x; } float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int step = fCol + threadIdx.x; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k*blockDim.x]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_4(int M,float* A_dev, float* SumPar_dev){ // Cant Filas por bloque int rowXblock = M / gridDim.y; // Cant Columnas por bloque int colXblock = M / gridDim.x; //Cant Columnas que le conrresponden a un thread int colXthread = colXblock / blockDim.x ; float parcial; int nCol; int paso; for (int j = 1; j <= colXthread; ++j) { parcial = 0; nCol = blockIdx.x * colXblock + threadIdx.x * j; paso = nCol * M + rowXblock * blockIdx.y; for (int k = 0; k < rowXblock; ++k) { parcial = parcial + A_dev[paso + k]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_5(int M,float* A_dev, float* SumPar_dev){ // Cant Filas por bloque int rowXblock = M / gridDim.y; // Cant Columnas por bloque int colXblock = M / gridDim.x; //Cant Filas que le conrresponden a un thread int rowXthread = rowXblock / blockDim.x ; float parcial; int nCol; int paso; for (int j = 0; j < colXblock; ++j) { parcial = 0; nCol = blockIdx.x * colXblock + j; paso = nCol * M + rowXblock * blockIdx.y; //For que suma la columna for (int k = 0; k < rowXthread; ++k) { int step = paso + threadIdx.x + k * blockDim.x; parcial = parcial + A_dev[step]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_6(int M,float* A_dev, float* SumPar_dev){ extern __shared__ float Nds[]; int pasos; if(blockDim.x>M){ pasos = 1; }else{ pasos = M / blockDim.x; } float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int step = fCol + threadIdx.x; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k*blockDim.x]; } }else{ parcial = 0; } Nds[threadIdx.x] = parcial; __syncthreads(); if (threadIdx.x == 0){ for (int i = 1; i < blockDim.x; ++i) { Nds[0] = Nds[0]+Nds[i]; } SumPar_dev[nCol] = Nds[0]; } } //extern "C" float sumaColMatriz(int M, int N, float * A_hst, int algoritmo){ size_t size = M * N * sizeof(float); size_t size2 = N*sizeof(float); float* A_dev, *SumPar_dev; float *SumPar_hst = (float *)malloc(N*sizeof(float)); // Allocate en device hipMalloc(&A_dev, size); hipMalloc(&SumPar_dev, size2); // Inicializo matrices en el device //clockStart(); hipMemcpy(A_dev, A_hst, size, hipMemcpyHostToDevice); hipMemset(SumPar_dev,0, size2); //clockStop("transf CPU -> GPU"); clockStart(); switch(algoritmo){ case 0:{ //Configurar la grilla dim3 tamGrid (1, 1); //Grid dimensin dim3 tamBlock(1, N); //Block dimensin hipLaunchKernelGGL(( MatrixSumKernel_0), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 0"); break; }case 1:{ //Configurar la grilla dim3 tamGrid (N, 1); //Grid dimensin dim3 tamBlock(1, 1); //Block dimensin hipLaunchKernelGGL(( MatrixSumKernel_1), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 1"); break; }case 2:{ // configuracin de la ejecucin int chunk2 = 32; //Configurar la grilla dim3 tamGrid (1, N); //Grid dimensin dim3 tamBlock(chunk2,1, 1); //Block dimensin hipLaunchKernelGGL(( MatrixSumKernel_2), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 2"); break; }case 3:{ // configuracin de la ejecucin int chunk2 = 32; dim3 tamGrid(1, N); //Grid dimensin dim3 tamBlock(chunk2,1,1); //Block dimensin // lanzamiento del kernel hipLaunchKernelGGL(( MatrixSumKernel_3), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 3"); break; }case 4:{ // configuracin de la ejecucin int chunk2 = 16; dim3 tamGrid(N/chunk2, N/chunk2); //Grid dimensin dim3 tamBlock(chunk2,1,1); //Block dimensin // lanzamiento del kernel hipLaunchKernelGGL(( MatrixSumKernel_4), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 4"); break; }case 5:{ // configuracin de la ejecucin int chunk2 = 16; dim3 tamGrid(N/chunk2, N/chunk2); //Grid dimensin dim3 tamBlock(chunk2,1,1); //Block dimensin // lanzamiento del kernel hipLaunchKernelGGL(( MatrixSumKernel_5), dim3(tamGrid), dim3(tamBlock), 0, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 5"); break; }case 6:{ // configuracin de la ejecucin int chunk2 = 32; dim3 tamGrid(1, N); //Grid dimensin dim3 tamBlock(chunk2,1,1); //Block dimensin // lanzamiento del kernel hipLaunchKernelGGL(( MatrixSumKernel_6), dim3(tamGrid), dim3(tamBlock),chunk2, 0, M, A_dev, SumPar_dev); hipDeviceSynchronize(); clockStop("kernel 6"); break; } } // Traer resultado; //clockStart(); hipMemcpy(SumPar_hst, SumPar_dev, size2, hipMemcpyDeviceToHost); //clockStop("transf CPU <- GPU"); // Sumar el vector de resultados parciales; float total = 0.0; for (int i = 0; i<N ; i++) total += SumPar_hst[i]; free(SumPar_hst); // Free matrices en device hipFree(A_dev); hipFree(SumPar_dev); return total; } float ejecutarCPU(float * A, int N){ clockStart(); float result_ref = sum_matrix(A,N); clockStop("CPU"); return result_ref; } int main(int argc, char** argv){ int n; int algo; float *A; if (argc < 3){ if (argc < 2){ printf("Sin Parametros, asume tamao 1024\n"); n=1024; }else{ n= atoi(argv[1]); } A = (float *)malloc(n*n*sizeof(float)); ejecutarCPU(A,n); sumaColMatriz(n,n,A,0); sumaColMatriz(n,n,A,1); sumaColMatriz(n,n,A,2); sumaColMatriz(n,n,A,3); sumaColMatriz(n,n,A,4); sumaColMatriz(n,n,A,5); sumaColMatriz(n,n,A,6); exit(0); } n= atoi(argv[1]); algo = atoi(argv[2]); A = (float *)malloc(n*n*sizeof(float)); init_matrix(A,n); float result_ref = ejecutarCPU(A,n); float result_gpu = sumaColMatriz(n,n,A,algo); if (result_gpu == result_ref){ printf("\n\nResultado OK!! :)\n\n\n"); }else{ printf("\nSegui participando\n\n"); } printf("GPU -> %f \n",result_gpu); printf("CPU -> %f \n\n",result_ref); free(A); return 0; }
403a40f14f67e287c00661d7544b977801d5071c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "util.h" using namespace std; #define chunk 256 void clockStart(); void clockStop(const char * str); float sum_matrix(const float *M, int width); void print_matrix(const float *M, int width); void clean_matrix(float *M, int width); void init_matrix(float *M, int width); //Kernel // Suma por columnas de una matriz con un solo bloque __global__ void MatrixSumKernel_0(int M, float* A_dev, float* SumPar_dev){ // Pvalue es usado para el valor intermedio double Pvalue = 0; int offset = threadIdx.y * M; for (int k = 0; k < M; k++) { Pvalue = Pvalue + A_dev[offset+k]; } SumPar_dev[threadIdx.y] = Pvalue; } __global__ void MatrixSumKernel_1(int M, float* A_dev, float* SumPar_dev){ // Pvalue es usado para el valor intermedio float Pvalue = 0; int columna = blockIdx.x; int fCol = columna * gridDim.x; for (int k = 0; k < M; k++) { Pvalue = Pvalue + A_dev[fCol+k]; } SumPar_dev[blockIdx.x] = Pvalue; } __global__ void MatrixSumKernel_2(int M,float* A_dev, float* SumPar_dev){ float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int pasos = M/ blockDim.x; int step = fCol + threadIdx.x*pasos; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_3(int M,float* A_dev, float* SumPar_dev){ int pasos; if(blockDim.x>M){ pasos = 1; }else{ pasos = M / blockDim.x; } float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int step = fCol + threadIdx.x; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k*blockDim.x]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_4(int M,float* A_dev, float* SumPar_dev){ // Cant Filas por bloque int rowXblock = M / gridDim.y; // Cant Columnas por bloque int colXblock = M / gridDim.x; //Cant Columnas que le conrresponden a un thread int colXthread = colXblock / blockDim.x ; float parcial; int nCol; int paso; for (int j = 1; j <= colXthread; ++j) { parcial = 0; nCol = blockIdx.x * colXblock + threadIdx.x * j; paso = nCol * M + rowXblock * blockIdx.y; for (int k = 0; k < rowXblock; ++k) { parcial = parcial + A_dev[paso + k]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_5(int M,float* A_dev, float* SumPar_dev){ // Cant Filas por bloque int rowXblock = M / gridDim.y; // Cant Columnas por bloque int colXblock = M / gridDim.x; //Cant Filas que le conrresponden a un thread int rowXthread = rowXblock / blockDim.x ; float parcial; int nCol; int paso; for (int j = 0; j < colXblock; ++j) { parcial = 0; nCol = blockIdx.x * colXblock + j; paso = nCol * M + rowXblock * blockIdx.y; //For que suma la columna for (int k = 0; k < rowXthread; ++k) { int step = paso + threadIdx.x + k * blockDim.x; parcial = parcial + A_dev[step]; } atomicAdd(&(SumPar_dev[nCol]), parcial); } } __global__ void MatrixSumKernel_6(int M,float* A_dev, float* SumPar_dev){ extern __shared__ float Nds[]; int pasos; if(blockDim.x>M){ pasos = 1; }else{ pasos = M / blockDim.x; } float parcial = 0; int nCol = blockIdx.y; int fCol = nCol * gridDim.y; int step = fCol + threadIdx.x; if(threadIdx.x<M){ for (int k = 0; k < pasos; ++k) { parcial = parcial + A_dev[step + k*blockDim.x]; } }else{ parcial = 0; } Nds[threadIdx.x] = parcial; __syncthreads(); if (threadIdx.x == 0){ for (int i = 1; i < blockDim.x; ++i) { Nds[0] = Nds[0]+Nds[i]; } SumPar_dev[nCol] = Nds[0]; } } //extern "C" float sumaColMatriz(int M, int N, float * A_hst, int algoritmo){ size_t size = M * N * sizeof(float); size_t size2 = N*sizeof(float); float* A_dev, *SumPar_dev; float *SumPar_hst = (float *)malloc(N*sizeof(float)); // Allocate en device cudaMalloc(&A_dev, size); cudaMalloc(&SumPar_dev, size2); // Inicializo matrices en el device //clockStart(); cudaMemcpy(A_dev, A_hst, size, cudaMemcpyHostToDevice); cudaMemset(SumPar_dev,0, size2); //clockStop("transf CPU -> GPU"); clockStart(); switch(algoritmo){ case 0:{ //Configurar la grilla dim3 tamGrid (1, 1); //Grid dimensión dim3 tamBlock(1, N); //Block dimensión MatrixSumKernel_0<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 0"); break; }case 1:{ //Configurar la grilla dim3 tamGrid (N, 1); //Grid dimensión dim3 tamBlock(1, 1); //Block dimensión MatrixSumKernel_1<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 1"); break; }case 2:{ // configuración de la ejecución int chunk2 = 32; //Configurar la grilla dim3 tamGrid (1, N); //Grid dimensión dim3 tamBlock(chunk2,1, 1); //Block dimensión MatrixSumKernel_2<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 2"); break; }case 3:{ // configuración de la ejecución int chunk2 = 32; dim3 tamGrid(1, N); //Grid dimensión dim3 tamBlock(chunk2,1,1); //Block dimensión // lanzamiento del kernel MatrixSumKernel_3<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 3"); break; }case 4:{ // configuración de la ejecución int chunk2 = 16; dim3 tamGrid(N/chunk2, N/chunk2); //Grid dimensión dim3 tamBlock(chunk2,1,1); //Block dimensión // lanzamiento del kernel MatrixSumKernel_4<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 4"); break; }case 5:{ // configuración de la ejecución int chunk2 = 16; dim3 tamGrid(N/chunk2, N/chunk2); //Grid dimensión dim3 tamBlock(chunk2,1,1); //Block dimensión // lanzamiento del kernel MatrixSumKernel_5<<<tamGrid, tamBlock>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 5"); break; }case 6:{ // configuración de la ejecución int chunk2 = 32; dim3 tamGrid(1, N); //Grid dimensión dim3 tamBlock(chunk2,1,1); //Block dimensión // lanzamiento del kernel MatrixSumKernel_6<<<tamGrid, tamBlock,chunk2>>>(M, A_dev, SumPar_dev); cudaDeviceSynchronize(); clockStop("kernel 6"); break; } } // Traer resultado; //clockStart(); cudaMemcpy(SumPar_hst, SumPar_dev, size2, cudaMemcpyDeviceToHost); //clockStop("transf CPU <- GPU"); // Sumar el vector de resultados parciales; float total = 0.0; for (int i = 0; i<N ; i++) total += SumPar_hst[i]; free(SumPar_hst); // Free matrices en device cudaFree(A_dev); cudaFree(SumPar_dev); return total; } float ejecutarCPU(float * A, int N){ clockStart(); float result_ref = sum_matrix(A,N); clockStop("CPU"); return result_ref; } int main(int argc, char** argv){ int n; int algo; float *A; if (argc < 3){ if (argc < 2){ printf("Sin Parametros, asume tamaño 1024\n"); n=1024; }else{ n= atoi(argv[1]); } A = (float *)malloc(n*n*sizeof(float)); ejecutarCPU(A,n); sumaColMatriz(n,n,A,0); sumaColMatriz(n,n,A,1); sumaColMatriz(n,n,A,2); sumaColMatriz(n,n,A,3); sumaColMatriz(n,n,A,4); sumaColMatriz(n,n,A,5); sumaColMatriz(n,n,A,6); exit(0); } n= atoi(argv[1]); algo = atoi(argv[2]); A = (float *)malloc(n*n*sizeof(float)); init_matrix(A,n); float result_ref = ejecutarCPU(A,n); float result_gpu = sumaColMatriz(n,n,A,algo); if (result_gpu == result_ref){ printf("\n\nResultado OK!! :)\n\n\n"); }else{ printf("\nSegui participando\n\n"); } printf("GPU -> %f \n",result_gpu); printf("CPU -> %f \n\n",result_ref); free(A); return 0; }
9daae6ec3e1d0708dcb4715b30c7d1e13d4abe51.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "naiveGmem.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( naiveGmem), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( naiveGmem), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( naiveGmem), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9daae6ec3e1d0708dcb4715b30c7d1e13d4abe51.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "naiveGmem.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); naiveGmem<<<gridBlock,threadBlock>>>(out,in,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { naiveGmem<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { naiveGmem<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f3583eb4fca9fbf51b4c363ec80e0ad5f5a17786.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021 ETH Zurich // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <pika/cuda.hpp> #include <pika/execution.hpp> #include <pika/init.hpp> #include <pika/testing.hpp> #include <whip.hpp> #include <cstddef> #include <type_traits> #include <utility> #include <vector> namespace cu = pika::cuda::experimental; namespace ex = pika::execution::experimental; namespace tt = pika::this_thread::experimental; __global__ void kernel(int* p, int i) { p[i] = i * 2; } template <typename Scheduler> inline constexpr bool is_cuda_scheduler_v = std::is_same_v<std::decay_t<Scheduler>, cu::cuda_scheduler>; #define CHECK_CUDA_COMPLETION_SCHEDULER(...) \ static_assert(is_cuda_scheduler_v<decltype(ex::get_completion_scheduler<ex::set_value_t>( \ ex::get_env(__VA_ARGS__)))>) #define CHECK_NOT_CUDA_COMPLETION_SCHEDULER(...) \ static_assert(!is_cuda_scheduler_v<decltype(ex::get_completion_scheduler<ex::set_value_t>( \ ex::get_env(__VA_ARGS__)))>) int pika_main() { pika::scoped_finalize sf; cu::cuda_pool pool{}; cu::cuda_scheduler sched{pool}; // Check that the completion scheduler is correctly set for various senders { auto s = ex::schedule(sched); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::just() | ex::transfer(sched); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_stream([](whip::stream_t) {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_cublas([](hipblasHandle_t) {}, HIPBLAS_POINTER_MODE_HOST); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_cusolver([](hipsolverDnHandle_t) {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_on_host([]() {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { #if !defined(PIKA_HAVE_CUDA) || defined(PIKA_CLANG_VERSION) // This test initializes the thread_pool_scheduler with nullptr only to // avoid it trying to get a thread pool through the default thread pool // handler which is not installed in this test (the pika runtime is not // started). The thread pool is never accessed. auto s = ex::schedule(sched) | cu::then_with_cublas([](hipblasHandle_t) {}, HIPBLAS_POINTER_MODE_HOST) | ex::transfer(ex::thread_pool_scheduler{nullptr}); CHECK_NOT_CUDA_COMPLETION_SCHEDULER(s); #endif } { cu::cuda_scheduler sched{pool}; // This partly tests implementation details. The scheduler is not // guaranteed to return a stream with the exact same priority as given // to the scheduler. It will return a stream with a priority "close to" // the given priority. Currently this means that anything high or higher // maps to high, and anything below high maps to normal. PIKA_TEST_EQ( sched.get_next_stream().get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::low) .get_next_stream() .get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::default_) .get_next_stream() .get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::high) .get_next_stream() .get_priority(), pika::execution::thread_priority::high); } { PIKA_TEST(ex::get_forward_progress_guarantee(sched) == ex::forward_progress_guarantee::weakly_parallel); } // Schedule work with the scheduler { cu::enable_user_polling poll("default"); int const n = 1000; int* p; whip::malloc(&p, sizeof(int) * n); cu::cuda_pool pool{}; cu::cuda_scheduler sched{pool}; std::vector<ex::unique_any_sender<>> senders; senders.reserve(n); for (std::size_t i = 0; i < n; ++i) { using pika::execution::thread_priority; senders.push_back(ex::schedule(ex::with_priority( sched, i % 2 ? thread_priority::high : thread_priority::normal)) | cu::then_with_stream([p, i](whip::stream_t stream) { hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, stream, p, i); whip::check_last_error(); })); } // This should use the following: // // tt::sync_wait(ex::when_all_vector(std::move(senders))); // // However, nvcc fails to compile it with an internal compiler error so // we use the less efficient but working manual version of it. for (auto& s : senders) { tt::sync_wait(std::move(s)); } std::vector<int> s(n, 0); whip::memcpy(s.data(), p, sizeof(int) * n, whip::memcpy_device_to_host); whip::free(p); for (int i = 0; i < n; ++i) { PIKA_TEST_EQ(s[i], i * 2); } } return 0; } int main(int argc, char** argv) { PIKA_TEST_EQ(pika::init(pika_main, argc, argv), 0); return 0; }
f3583eb4fca9fbf51b4c363ec80e0ad5f5a17786.cu
// Copyright (c) 2021 ETH Zurich // // SPDX-License-Identifier: BSL-1.0 // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) #include <pika/cuda.hpp> #include <pika/execution.hpp> #include <pika/init.hpp> #include <pika/testing.hpp> #include <whip.hpp> #include <cstddef> #include <type_traits> #include <utility> #include <vector> namespace cu = pika::cuda::experimental; namespace ex = pika::execution::experimental; namespace tt = pika::this_thread::experimental; __global__ void kernel(int* p, int i) { p[i] = i * 2; } template <typename Scheduler> inline constexpr bool is_cuda_scheduler_v = std::is_same_v<std::decay_t<Scheduler>, cu::cuda_scheduler>; #define CHECK_CUDA_COMPLETION_SCHEDULER(...) \ static_assert(is_cuda_scheduler_v<decltype(ex::get_completion_scheduler<ex::set_value_t>( \ ex::get_env(__VA_ARGS__)))>) #define CHECK_NOT_CUDA_COMPLETION_SCHEDULER(...) \ static_assert(!is_cuda_scheduler_v<decltype(ex::get_completion_scheduler<ex::set_value_t>( \ ex::get_env(__VA_ARGS__)))>) int pika_main() { pika::scoped_finalize sf; cu::cuda_pool pool{}; cu::cuda_scheduler sched{pool}; // Check that the completion scheduler is correctly set for various senders { auto s = ex::schedule(sched); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::just() | ex::transfer(sched); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_stream([](whip::stream_t) {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_cublas([](cublasHandle_t) {}, CUBLAS_POINTER_MODE_HOST); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_with_cusolver([](cusolverDnHandle_t) {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { auto s = ex::schedule(sched) | cu::then_on_host([]() {}); CHECK_CUDA_COMPLETION_SCHEDULER(s); } { #if !defined(PIKA_HAVE_CUDA) || defined(PIKA_CLANG_VERSION) // This test initializes the thread_pool_scheduler with nullptr only to // avoid it trying to get a thread pool through the default thread pool // handler which is not installed in this test (the pika runtime is not // started). The thread pool is never accessed. auto s = ex::schedule(sched) | cu::then_with_cublas([](cublasHandle_t) {}, CUBLAS_POINTER_MODE_HOST) | ex::transfer(ex::thread_pool_scheduler{nullptr}); CHECK_NOT_CUDA_COMPLETION_SCHEDULER(s); #endif } { cu::cuda_scheduler sched{pool}; // This partly tests implementation details. The scheduler is not // guaranteed to return a stream with the exact same priority as given // to the scheduler. It will return a stream with a priority "close to" // the given priority. Currently this means that anything high or higher // maps to high, and anything below high maps to normal. PIKA_TEST_EQ( sched.get_next_stream().get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::low) .get_next_stream() .get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::default_) .get_next_stream() .get_priority(), pika::execution::thread_priority::normal); PIKA_TEST_EQ(ex::with_priority(sched, pika::execution::thread_priority::high) .get_next_stream() .get_priority(), pika::execution::thread_priority::high); } { PIKA_TEST(ex::get_forward_progress_guarantee(sched) == ex::forward_progress_guarantee::weakly_parallel); } // Schedule work with the scheduler { cu::enable_user_polling poll("default"); int const n = 1000; int* p; whip::malloc(&p, sizeof(int) * n); cu::cuda_pool pool{}; cu::cuda_scheduler sched{pool}; std::vector<ex::unique_any_sender<>> senders; senders.reserve(n); for (std::size_t i = 0; i < n; ++i) { using pika::execution::thread_priority; senders.push_back(ex::schedule(ex::with_priority( sched, i % 2 ? thread_priority::high : thread_priority::normal)) | cu::then_with_stream([p, i](whip::stream_t stream) { kernel<<<1, 1, 0, stream>>>(p, i); whip::check_last_error(); })); } // This should use the following: // // tt::sync_wait(ex::when_all_vector(std::move(senders))); // // However, nvcc fails to compile it with an internal compiler error so // we use the less efficient but working manual version of it. for (auto& s : senders) { tt::sync_wait(std::move(s)); } std::vector<int> s(n, 0); whip::memcpy(s.data(), p, sizeof(int) * n, whip::memcpy_device_to_host); whip::free(p); for (int i = 0; i < n; ++i) { PIKA_TEST_EQ(s[i], i * 2); } } return 0; } int main(int argc, char** argv) { PIKA_TEST_EQ(pika::init(pika_main, argc, argv), 0); return 0; }
1e538613d7e37046e552e99616a2a780c4a6f29e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #include <unistd.h> #define CHECK(call) \ { \ const hipError_t error = call; \ if (error != hipSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ hipGetErrorString(error)); \ exit(1); \ } \ } __global__ void gpu_sleep(const int sleep_time) { int tmp = 0; for (int i=sleep_time; i<sleep_time; i++) tmp += i; printf("GPU job threadId (%d) done, sleep for %d seconds.\n", threadIdx.x, sleep_time); } int main(int argc, char **argv) { // set up device. int dev_count; int dev = 0; hipDeviceProp_t dprop; CHECK(hipGetDeviceCount(&dev_count)); CHECK(hipGetDeviceProperties(&dprop, dev)); printf("There are %d devices in the system. \n", dev_count); printf("%s start at device %d: %s \n", argv[0], dev, dprop.name); CHECK(hipSetDevice(dev)); int sleep_time = 1; if (argc > 1) { sleep_time = atoi(argv[1]); } int blocksize = 1; if (argc > 2) { blocksize = atoi(argv[2]); } // execution configuration dim3 block (blocksize); dim3 grid (1); // kernel: sleep. hipLaunchKernelGGL(( gpu_sleep) , dim3(grid), dim3(block), 0, 0, sleep_time); sleep(sleep_time); // reset device. CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
1e538613d7e37046e552e99616a2a780c4a6f29e.cu
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <unistd.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } __global__ void gpu_sleep(const int sleep_time) { int tmp = 0; for (int i=sleep_time; i<sleep_time; i++) tmp += i; printf("GPU job threadId (%d) done, sleep for %d seconds.\n", threadIdx.x, sleep_time); } int main(int argc, char **argv) { // set up device. int dev_count; int dev = 0; cudaDeviceProp dprop; CHECK(cudaGetDeviceCount(&dev_count)); CHECK(cudaGetDeviceProperties(&dprop, dev)); printf("There are %d devices in the system. \n", dev_count); printf("%s start at device %d: %s \n", argv[0], dev, dprop.name); CHECK(cudaSetDevice(dev)); int sleep_time = 1; if (argc > 1) { sleep_time = atoi(argv[1]); } int blocksize = 1; if (argc > 2) { blocksize = atoi(argv[2]); } // execution configuration dim3 block (blocksize); dim3 grid (1); // kernel: sleep. gpu_sleep <<<grid, block>>> (sleep_time); sleep(sleep_time); // reset device. CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
0617865bb3d7dc0472fa5cb7e07be877a3200137.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author GS <[email protected]> // #include <system/op_boilerplate.h> #include <array/NDArray.h> #include <execution/Threads.h> #include <helpers/ConstantTadHelper.h> #include "../triangular_solve.h" namespace sd { namespace ops { namespace helpers { /* * lower triangular process for system of linear equations * x_1 = b_1/a_1,1 * x_2 = (b_2 - a_2,1 * x_1) / a_2,2 * x_3 = (b_3 - a_3,1 * x_1 - a_3,2 * x_2) / a_3,3 * ... * x_M = (b_M - a_M,1 * x_1 - ... a_M,M-1 * x_M-1)/ a_M,M * * output == x * a == leftInput * b == rightInput * * */ template <typename T> static _CUDA_HD void lowerTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape, T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output, const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) { for (auto r = 0; r < rows; r++) { for (auto j = 0; j < cols; j++) { Nd4jLong posY[] = {r, j}; Nd4jLong posX[] = {r, r}; auto xIndex = shape::getOffset(leftInputShape, posX, 0); auto yIndex = shape::getOffset(rightInputShape, posY, 0); auto zIndex = shape::getOffset(outputShape, posY, 0); auto sum = rightInput[yIndex]; for (auto c = 0; c < r; c++) { Nd4jLong posZ[] = {c, j}; Nd4jLong pos[] = {r, c}; auto xcIndex = shape::getOffset(leftInputShape, pos, 0); auto zcIndex = shape::getOffset(outputShape, posZ, 0); sum -= leftInput[xcIndex] * output[zcIndex]; } output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex]; } } } /* * upper triangular process for system of linear equations * x_M = b_M/a_M,M * x_M-1 = (b_M-1 - a_M-1,M-2 * x_M) / a_M-1,M-1 * x_M-2 = (b_M-2 - a_M-2,M-3 * x_M-2 - a_M-2,M-1 * x_M) / a_3,3 * ... * x_1 = (b_1 - a_1,2 * x_2 - ... a_1,M * x_M)/ a_1,1 * * output == x * a == leftInput * b == rightInput * * */ template <typename T> static _CUDA_HD void upperTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape, T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output, const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) { for (auto r = rows; r > 0; r--) { for (auto j = 0; j < cols; j++) { Nd4jLong posY[] = {r - 1, j}; Nd4jLong posX[] = {r - 1, r - 1}; auto xIndex = shape::getOffset(leftInputShape, posX, 0); auto yIndex = shape::getOffset(rightInputShape, posY, 0); auto zIndex = shape::getOffset(outputShape, posY, 0); auto sum = rightInput[yIndex]; for (auto c = r; c < rows; c++) { Nd4jLong posZ[] = {c, j}; Nd4jLong pos[] = {r - 1, c}; auto zcIndex = shape::getOffset(outputShape, posZ, 0); auto xcIndex = shape::getOffset(leftInputShape, pos, 0); sum -= leftInput[xcIndex] * output[zcIndex]; } output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex]; } } } template <typename T> static __global__ void triangularSolveKernel(T const* leftInput, Nd4jLong const* leftPartShape, T const* rightInput, Nd4jLong const* rightPartShape, bool const lower, bool const unitsOnDiag, T* output, const Nd4jLong* outputShape, const Nd4jLong* tadLeftShape, const Nd4jLong* tadLeftOffset, const Nd4jLong* tadRightShape, const Nd4jLong* tadRightOffset, const Nd4jLong* tadOutputShape, const Nd4jLong* tadOutputOffset, Nd4jLong batchNum) { __shared__ Nd4jLong rows; __shared__ Nd4jLong cols; if (threadIdx.x == 0) { rows = shape::sizeAt(leftPartShape, -2); cols = shape::sizeAt(rightPartShape, -1); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto stop = batchNum; auto increment = blockDim.x * gridDim.x; for (auto i = start; i < stop; i += increment) { auto pLeftPart = leftInput + tadLeftOffset[i]; auto pRightPart = rightInput + tadRightOffset[i]; auto pOutputPart = output + tadOutputOffset[i]; if (lower) { lowerTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols); } else { upperTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols); } } } template <typename T> static int triangularSolveFunctor_(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) { NDArray::prepareSpecialUse({output}, {leftInput, rightInput}); auto leftTads = ConstantTadHelper::getInstance().tadForDimensions(leftInput->shapeInfo(), {-2, -1}); auto rightTads = ConstantTadHelper::getInstance().tadForDimensions(rightInput->shapeInfo(), {-2, -1}); auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1}); auto stream = context->getCudaStream(); T const* leftBuf = reinterpret_cast<T const*>(leftInput->specialBuffer()); T const* rightBuf = reinterpret_cast<T const*>(rightInput->specialBuffer()); T* outputBuf = reinterpret_cast<T*>(output->specialBuffer()); hipLaunchKernelGGL(( triangularSolveKernel<T>), dim3(128), dim3(128), 256, *stream, leftBuf, leftInput->specialShapeInfo(), rightBuf, rightInput->specialShapeInfo(), lower, unitsOnDiag, outputBuf, output->specialShapeInfo(), leftTads.specialShapeInfo(), leftTads.specialOffsets(), rightTads.specialShapeInfo(), rightTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets(), leftTads.numberOfTads()); NDArray::registerSpecialUse({output}, {leftInput, rightInput}); return Status::OK(); } /// triangularSolve2D - 2D implementation of triangularSolveFunctor /// \tparam T - type of NDArray output /// \param context - launch context pointer /// \param leftInput - T matrix of equation Tx = b /// \param rightInput - b vector of equation Tx = b /// \param lower - lower or upper triangular matrix /// \param unitsOnDiag - solve for case when only units (1.0) on diagonal is assumed /// \param output - output vector (x on equation Tx = b) /// template <typename T> ND4J_LOCAL void triangularSolve2D(sd::LaunchContext* context, const NDArray& leftInput, const NDArray& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output) { triangularSolveFunctor_<T>(context, const_cast<NDArray*>(&leftInput), const_cast<NDArray*>(&rightInput), lower, unitsOnDiag, &output); // leftInput.syncToHost(); rightInput.syncToHost(); output.syncToHost(); // T const* pLeftPart = (T const*)leftInput.getBuffer(); // T const* pRightPart = (T const*)rightInput.getBuffer(); // T* pOutputPart = (T*)output.buffer(); // auto rows = leftInput.rows(); // auto cols = leftInput.columns(); // if (lower) { // lowerTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols); // } else { // upperTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols); // } // output.syncToDevice(); } BUILD_SINGLE_TEMPLATE(template ND4J_LOCAL void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES); // template void triangularSolve2D<float>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<bfloat16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<float16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<double>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); ND4J_LOCAL int triangularSolveFunctor(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) { BUILD_SINGLE_SELECTOR(leftInput->dataType(), return triangularSolveFunctor_, (context, leftInput, rightInput, lower, unitsOnDiag, output), FLOAT_NATIVE); } template <typename T> static __global__ void upperAdjointKernel(T const* input, T* output, Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns, Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) { for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) { auto inputPart = input + inputOffsets[b]; auto outputPart = output + outputOffsets[b]; for (auto r = threadIdx.x; r < rows; r += blockDim.x) { for (auto c = threadIdx.y; c <= r; c += blockDim.y) { Nd4jLong zPos[] = {r, c}; Nd4jLong xPos[] = {c, r}; auto zIndex = shape::getOffset(outputTads, zPos); auto xIndex = shape::getOffset(inputTads, xPos); outputPart[zIndex] = inputPart[xIndex]; } } } } template <typename T> static __global__ void lowerAdjointKernel(T const* input, T* output, Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns, Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) { for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) { auto inputPart = input + inputOffsets[b]; auto outputPart = output + outputOffsets[b]; for (auto r = threadIdx.x; r < rows; r += blockDim.x) { for (auto c = r + threadIdx.y; c < columns; c += blockDim.y) { Nd4jLong zPos[] = {r, c}; Nd4jLong xPos[] = {c, r}; auto zIndex = shape::getOffset(outputTads, zPos); auto xIndex = shape::getOffset(inputTads, xPos); outputPart[zIndex] = inputPart[xIndex]; } } } } template <typename T> static void adjointTriangularMatrix_(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) { auto inputTads = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {-2, -1}); auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1}); auto stream = context->getCudaStream(); auto inputBuf = reinterpret_cast<T const*>(input->specialBuffer()); auto outputBuf = reinterpret_cast<T*>(output->specialBuffer()); auto rows = input->sizeAt(-2); auto columns = input->sizeAt(-1); if (lower) { hipLaunchKernelGGL(( lowerAdjointKernel<T>), dim3(128), dim3(256), 256, *stream, inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets()); } else { hipLaunchKernelGGL(( upperAdjointKernel<T>), dim3(128), dim3(256), 256, *stream, inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets()); } } ND4J_LOCAL void adjointMatrix(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), adjointTriangularMatrix_, (context, input, lower, output), FLOAT_NATIVE); } /* ////////////////////////////////////////////////////////////////////////// template <typename T> void triangularSolve2D(sd::LaunchContext* context, NDArray const& A, NDArray const& b, bool const lower, bool const unitsOnDiag, NDArray& x) { if(A.rankOf() != 2) throw std::runtime_error("triangularSolve2D: input matrix A must be 2D !"); int temp; const bool isBvector = b.isCommonVector(temp); const bool isXvector = x.isCommonVector(temp); if(A.sizeAt(0) != (isBvector ? b.lengthOf() : b.sizeAt(0))) throw std::runtime_error("triangularSolve2D: A and b must have the same number of rows !"); if(A.sizeAt(1) != (isXvector ? x.lengthOf() : x.sizeAt(0))) throw std::runtime_error("triangularSolve2D: columns number of array A must be equal to rows number of array x !"); if(isBvector) { if(lower) { for (int i = 0; i < A.sizeAt(0); ++i) { T sum = b.t<T>(i); for (int j = 0; j < i; ++j) sum -= A.t<T>(i,j) * x.t<T>(j); x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } else { for (int i = A.sizeAt(0) - 1; i >= 0; --i) { T sum = b.t<T>(i); for (int j = i + 1; j < A.sizeAt(1); ++j) sum -= A.t<T>(i,j) * x.t<T>(j); x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } else { if(lower) { for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) { for (int i = 0; i < A.sizeAt(0); ++i) { T sum = b.t<T>(i, bCol); for (int j = 0; j < i; ++j) sum -= A.t<T>(i,j) * x.t<T>(j, bCol); x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } else { for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) { for (int i = A.sizeAt(0) - 1; i >= 0; --i) { T sum = b.t<T>(i, bCol); for (int j = i + 1; j < A.sizeAt(1); ++j) sum -= A.t<T>(i,j) * x.t<T>(j, bCol); x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } } } BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES); */ } } }
0617865bb3d7dc0472fa5cb7e07be877a3200137.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author GS <[email protected]> // #include <system/op_boilerplate.h> #include <array/NDArray.h> #include <execution/Threads.h> #include <helpers/ConstantTadHelper.h> #include "../triangular_solve.h" namespace sd { namespace ops { namespace helpers { /* * lower triangular process for system of linear equations * x_1 = b_1/a_1,1 * x_2 = (b_2 - a_2,1 * x_1) / a_2,2 * x_3 = (b_3 - a_3,1 * x_1 - a_3,2 * x_2) / a_3,3 * ... * x_M = (b_M - a_M,1 * x_1 - ... a_M,M-1 * x_M-1)/ a_M,M * * output == x * a == leftInput * b == rightInput * * */ template <typename T> static _CUDA_HD void lowerTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape, T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output, const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) { for (auto r = 0; r < rows; r++) { for (auto j = 0; j < cols; j++) { Nd4jLong posY[] = {r, j}; Nd4jLong posX[] = {r, r}; auto xIndex = shape::getOffset(leftInputShape, posX, 0); auto yIndex = shape::getOffset(rightInputShape, posY, 0); auto zIndex = shape::getOffset(outputShape, posY, 0); auto sum = rightInput[yIndex]; for (auto c = 0; c < r; c++) { Nd4jLong posZ[] = {c, j}; Nd4jLong pos[] = {r, c}; auto xcIndex = shape::getOffset(leftInputShape, pos, 0); auto zcIndex = shape::getOffset(outputShape, posZ, 0); sum -= leftInput[xcIndex] * output[zcIndex]; } output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex]; } } } /* * upper triangular process for system of linear equations * x_M = b_M/a_M,M * x_M-1 = (b_M-1 - a_M-1,M-2 * x_M) / a_M-1,M-1 * x_M-2 = (b_M-2 - a_M-2,M-3 * x_M-2 - a_M-2,M-1 * x_M) / a_3,3 * ... * x_1 = (b_1 - a_1,2 * x_2 - ... a_1,M * x_M)/ a_1,1 * * output == x * a == leftInput * b == rightInput * * */ template <typename T> static _CUDA_HD void upperTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape, T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output, const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) { for (auto r = rows; r > 0; r--) { for (auto j = 0; j < cols; j++) { Nd4jLong posY[] = {r - 1, j}; Nd4jLong posX[] = {r - 1, r - 1}; auto xIndex = shape::getOffset(leftInputShape, posX, 0); auto yIndex = shape::getOffset(rightInputShape, posY, 0); auto zIndex = shape::getOffset(outputShape, posY, 0); auto sum = rightInput[yIndex]; for (auto c = r; c < rows; c++) { Nd4jLong posZ[] = {c, j}; Nd4jLong pos[] = {r - 1, c}; auto zcIndex = shape::getOffset(outputShape, posZ, 0); auto xcIndex = shape::getOffset(leftInputShape, pos, 0); sum -= leftInput[xcIndex] * output[zcIndex]; } output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex]; } } } template <typename T> static __global__ void triangularSolveKernel(T const* leftInput, Nd4jLong const* leftPartShape, T const* rightInput, Nd4jLong const* rightPartShape, bool const lower, bool const unitsOnDiag, T* output, const Nd4jLong* outputShape, const Nd4jLong* tadLeftShape, const Nd4jLong* tadLeftOffset, const Nd4jLong* tadRightShape, const Nd4jLong* tadRightOffset, const Nd4jLong* tadOutputShape, const Nd4jLong* tadOutputOffset, Nd4jLong batchNum) { __shared__ Nd4jLong rows; __shared__ Nd4jLong cols; if (threadIdx.x == 0) { rows = shape::sizeAt(leftPartShape, -2); cols = shape::sizeAt(rightPartShape, -1); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto stop = batchNum; auto increment = blockDim.x * gridDim.x; for (auto i = start; i < stop; i += increment) { auto pLeftPart = leftInput + tadLeftOffset[i]; auto pRightPart = rightInput + tadRightOffset[i]; auto pOutputPart = output + tadOutputOffset[i]; if (lower) { lowerTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols); } else { upperTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols); } } } template <typename T> static int triangularSolveFunctor_(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) { NDArray::prepareSpecialUse({output}, {leftInput, rightInput}); auto leftTads = ConstantTadHelper::getInstance().tadForDimensions(leftInput->shapeInfo(), {-2, -1}); auto rightTads = ConstantTadHelper::getInstance().tadForDimensions(rightInput->shapeInfo(), {-2, -1}); auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1}); auto stream = context->getCudaStream(); T const* leftBuf = reinterpret_cast<T const*>(leftInput->specialBuffer()); T const* rightBuf = reinterpret_cast<T const*>(rightInput->specialBuffer()); T* outputBuf = reinterpret_cast<T*>(output->specialBuffer()); triangularSolveKernel<T><<<128, 128, 256, *stream>>>(leftBuf, leftInput->specialShapeInfo(), rightBuf, rightInput->specialShapeInfo(), lower, unitsOnDiag, outputBuf, output->specialShapeInfo(), leftTads.specialShapeInfo(), leftTads.specialOffsets(), rightTads.specialShapeInfo(), rightTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets(), leftTads.numberOfTads()); NDArray::registerSpecialUse({output}, {leftInput, rightInput}); return Status::OK(); } /// triangularSolve2D - 2D implementation of triangularSolveFunctor /// \tparam T - type of NDArray output /// \param context - launch context pointer /// \param leftInput - T matrix of equation Tx = b /// \param rightInput - b vector of equation Tx = b /// \param lower - lower or upper triangular matrix /// \param unitsOnDiag - solve for case when only units (1.0) on diagonal is assumed /// \param output - output vector (x on equation Tx = b) /// template <typename T> ND4J_LOCAL void triangularSolve2D(sd::LaunchContext* context, const NDArray& leftInput, const NDArray& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output) { triangularSolveFunctor_<T>(context, const_cast<NDArray*>(&leftInput), const_cast<NDArray*>(&rightInput), lower, unitsOnDiag, &output); // leftInput.syncToHost(); rightInput.syncToHost(); output.syncToHost(); // T const* pLeftPart = (T const*)leftInput.getBuffer(); // T const* pRightPart = (T const*)rightInput.getBuffer(); // T* pOutputPart = (T*)output.buffer(); // auto rows = leftInput.rows(); // auto cols = leftInput.columns(); // if (lower) { // lowerTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols); // } else { // upperTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols); // } // output.syncToDevice(); } BUILD_SINGLE_TEMPLATE(template ND4J_LOCAL void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES); // template void triangularSolve2D<float>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<bfloat16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<float16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); // template void triangularSolve2D<double>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output); ND4J_LOCAL int triangularSolveFunctor(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) { BUILD_SINGLE_SELECTOR(leftInput->dataType(), return triangularSolveFunctor_, (context, leftInput, rightInput, lower, unitsOnDiag, output), FLOAT_NATIVE); } template <typename T> static __global__ void upperAdjointKernel(T const* input, T* output, Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns, Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) { for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) { auto inputPart = input + inputOffsets[b]; auto outputPart = output + outputOffsets[b]; for (auto r = threadIdx.x; r < rows; r += blockDim.x) { for (auto c = threadIdx.y; c <= r; c += blockDim.y) { Nd4jLong zPos[] = {r, c}; Nd4jLong xPos[] = {c, r}; auto zIndex = shape::getOffset(outputTads, zPos); auto xIndex = shape::getOffset(inputTads, xPos); outputPart[zIndex] = inputPart[xIndex]; } } } } template <typename T> static __global__ void lowerAdjointKernel(T const* input, T* output, Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns, Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) { for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) { auto inputPart = input + inputOffsets[b]; auto outputPart = output + outputOffsets[b]; for (auto r = threadIdx.x; r < rows; r += blockDim.x) { for (auto c = r + threadIdx.y; c < columns; c += blockDim.y) { Nd4jLong zPos[] = {r, c}; Nd4jLong xPos[] = {c, r}; auto zIndex = shape::getOffset(outputTads, zPos); auto xIndex = shape::getOffset(inputTads, xPos); outputPart[zIndex] = inputPart[xIndex]; } } } } template <typename T> static void adjointTriangularMatrix_(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) { auto inputTads = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {-2, -1}); auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1}); auto stream = context->getCudaStream(); auto inputBuf = reinterpret_cast<T const*>(input->specialBuffer()); auto outputBuf = reinterpret_cast<T*>(output->specialBuffer()); auto rows = input->sizeAt(-2); auto columns = input->sizeAt(-1); if (lower) { lowerAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets()); } else { upperAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets()); } } ND4J_LOCAL void adjointMatrix(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) { BUILD_SINGLE_SELECTOR(input->dataType(), adjointTriangularMatrix_, (context, input, lower, output), FLOAT_NATIVE); } /* ////////////////////////////////////////////////////////////////////////// template <typename T> void triangularSolve2D(sd::LaunchContext* context, NDArray const& A, NDArray const& b, bool const lower, bool const unitsOnDiag, NDArray& x) { if(A.rankOf() != 2) throw std::runtime_error("triangularSolve2D: input matrix A must be 2D !"); int temp; const bool isBvector = b.isCommonVector(temp); const bool isXvector = x.isCommonVector(temp); if(A.sizeAt(0) != (isBvector ? b.lengthOf() : b.sizeAt(0))) throw std::runtime_error("triangularSolve2D: A and b must have the same number of rows !"); if(A.sizeAt(1) != (isXvector ? x.lengthOf() : x.sizeAt(0))) throw std::runtime_error("triangularSolve2D: columns number of array A must be equal to rows number of array x !"); if(isBvector) { if(lower) { for (int i = 0; i < A.sizeAt(0); ++i) { T sum = b.t<T>(i); for (int j = 0; j < i; ++j) sum -= A.t<T>(i,j) * x.t<T>(j); x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } else { for (int i = A.sizeAt(0) - 1; i >= 0; --i) { T sum = b.t<T>(i); for (int j = i + 1; j < A.sizeAt(1); ++j) sum -= A.t<T>(i,j) * x.t<T>(j); x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } else { if(lower) { for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) { for (int i = 0; i < A.sizeAt(0); ++i) { T sum = b.t<T>(i, bCol); for (int j = 0; j < i; ++j) sum -= A.t<T>(i,j) * x.t<T>(j, bCol); x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } else { for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) { for (int i = A.sizeAt(0) - 1; i >= 0; --i) { T sum = b.t<T>(i, bCol); for (int j = i + 1; j < A.sizeAt(1); ++j) sum -= A.t<T>(i,j) * x.t<T>(j, bCol); x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i); } } } } } BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES); */ } } }
640b0aeb43569755b20dcb49014d9e7acc48ed5a.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include "HOGEngine.h" #include "HOGUtils.h" #include "HOGConvolution.h" #define convKernelRadius 1 #define convKernelWidth (2 * convKernelRadius + 1) #define convKernelSize (convKernelWidth * sizeof(float)) #define convRowTileWidth 128 #define convKernelRadiusAligned 16 #define convColumnTileWidth 16 #define convColumnTileHeight 48 dim3 blockGridRows; dim3 blockGridColumns; dim3 threadBlockRows; dim3 threadBlockColumns; __device__ __constant__ float d_Kernel[convKernelWidth]; float h_Kernel[convKernelWidth]; float4 *convBuffer4; float1 *convBuffer1; int convWidth; int convHeight; int convBufferElements; bool convUseGrayscale; template<int i> __device__ float1 convolutionRow(float1 *data) { float1 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.x += convolutionRow<i-1>(data).x; return val; } template<> __device__ float1 convolutionRow<-1>(float1 *data) { float1 zero; zero.x = 0; return zero; } template<int i> __device__ float1 convolutionColumn(float1 *data) { float1 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.x += convolutionColumn<i-1>(data).x; return val; } template<> __device__ float1 convolutionColumn<-1>(float1 *data) { float1 zero; zero.x = 0; return zero; } template<int i> __device__ float4 convolutionRow(float4 *data) { float4 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionRow<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionRow<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } template<int i> __device__ float4 convolutionColumn(float4 *data) { float4 val = data[(convKernelRadius-i) * convColumnTileWidth]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionColumn<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionColumn<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } __global__ void convolutionRowGPU1(float1 *d_Result, float1 *d_Data, int dataW, int dataH) { float1 zero; zero.x = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float1 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if (loadPos >= apronStart) { const int smemPos = loadPos - apronStart; const bool inApron = (loadPos >= apronStartClamped) && (loadPos <= apronEndClamped); data[smemPos] = inApron ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float1 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionRowGPU4(float4 *d_Result, float4 *d_Data, int dataW, int dataH) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float4 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if (loadPos >= apronStart) { const int smemPos = loadPos - apronStart; const bool inApron = (loadPos >= apronStartClamped) && (loadPos <= apronEndClamped); data[smemPos] = inApron ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float4 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionColumnGPU1to2 ( float2 *d_Result, float1 *d_Data, float1 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { float1 rowValue; float1 zero; zero.x = 0; float2 result; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float1 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; bool inApron; for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { inApron = (y >= apronStartClamped) && (y <= apronEndClamped); data[smemPos] = inApron ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float1 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; result.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); result.y = atan2f(sum.x, rowValue.x) * RADTODEG; d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __global__ void convolutionColumnGPU4to2 ( float2 *d_Result, float4 *d_Data, float4 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { //float3 max12, mag4; float3 mag1, mag2, mag3; float3 max34, magMax; float2 result; float4 rowValue; float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float4 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; bool inApron; for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { inApron = (y >= apronStartClamped) && (y <= apronEndClamped); data[smemPos] = inApron ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for (int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float4 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; mag1.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); mag1.y = sum.x; mag1.z = rowValue.x; mag2.x = sqrtf(sum.y * sum.y + rowValue.y * rowValue.y); mag2.y = sum.y; mag2.z = rowValue.y; mag3.x = sqrtf(sum.z * sum.z + rowValue.z * rowValue.z); mag3.y = sum.z; mag3.z = rowValue.z; max34 = (mag2.x > mag3.x) ? mag2 : mag3; magMax = (mag1.x > max34.x) ? mag1 : max34; result.x = magMax.x; result.y = atan2f(magMax.y, magMax.z); result.y = result.y * 180 / PI + 180; result.y = int(result.y) % 180; //TODO-> if semicerc d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } void DeviceAllocHOGConvolutionMemory(void) { int elements = convBufferElements; if (convUseGrayscale) { checkCudaErrors(hipMalloc(&convBuffer1, sizeof(float1) * elements)); } else { checkCudaErrors(hipMalloc(&convBuffer4, sizeof(float4) * elements)); } } void CopyInHOGConvolution(void) { float h_Kernel[convKernelWidth]; h_Kernel[0] = 1.0f; h_Kernel[1] = 0; h_Kernel[2] = -1.0f; checkCudaErrors(hipMemcpyToSymbolAsync(d_Kernel, h_Kernel, convKernelSize, 0, hipMemcpyHostToDevice, stream)); checkCudaErrors(hipStreamSynchronize(stream)); } void DeviceFreeHOGConvolutionMemory(void) { if (convUseGrayscale) { checkCudaErrors(hipFree(convBuffer1)); convBuffer1 = NULL; } else { checkCudaErrors(hipFree(convBuffer4)); convBuffer4 = NULL; } } void InitConvolution(int width, int height, bool useGrayscale) { convUseGrayscale = useGrayscale; convBufferElements = width * height; } void SetConvolutionSize(int width, int height) { convWidth = width; convHeight = height; blockGridRows = dim3(iDivUp(convWidth, convRowTileWidth), convHeight); blockGridColumns = dim3(iDivUp(convWidth, convColumnTileWidth), iDivUp(convHeight, convColumnTileHeight)); threadBlockRows = dim3(convKernelRadiusAligned + convRowTileWidth + convKernelRadius); threadBlockColumns = dim3(convColumnTileWidth, 8); } void CloseConvolution() {} void ComputeColorGradients1to2(float1* inputImage, float2* outputImage) { hipLaunchKernelGGL(( convolutionRowGPU1), dim3(blockGridRows), dim3(threadBlockRows), 0, stream, convBuffer1, inputImage, convWidth, convHeight); checkCudaErrors(hipStreamSynchronize(stream)); hipLaunchKernelGGL(( convolutionColumnGPU1to2), dim3(blockGridColumns), dim3(threadBlockColumns), 0, stream, outputImage, inputImage, convBuffer1, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); checkCudaErrors(hipStreamSynchronize(stream)); } void ComputeColorGradients4to2(float4* inputImage, float2* outputImage) { hipLaunchKernelGGL(( convolutionRowGPU4), dim3(blockGridRows), dim3(threadBlockRows), 0, stream, convBuffer4, inputImage, convWidth, convHeight); checkCudaErrors(hipStreamSynchronize(stream)); hipLaunchKernelGGL(( convolutionColumnGPU4to2), dim3(blockGridColumns), dim3(threadBlockColumns), 0, stream, outputImage, inputImage, convBuffer4, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); checkCudaErrors(hipStreamSynchronize(stream)); }
640b0aeb43569755b20dcb49014d9e7acc48ed5a.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include "HOGEngine.h" #include "HOGUtils.h" #include "HOGConvolution.h" #define convKernelRadius 1 #define convKernelWidth (2 * convKernelRadius + 1) #define convKernelSize (convKernelWidth * sizeof(float)) #define convRowTileWidth 128 #define convKernelRadiusAligned 16 #define convColumnTileWidth 16 #define convColumnTileHeight 48 dim3 blockGridRows; dim3 blockGridColumns; dim3 threadBlockRows; dim3 threadBlockColumns; __device__ __constant__ float d_Kernel[convKernelWidth]; float h_Kernel[convKernelWidth]; float4 *convBuffer4; float1 *convBuffer1; int convWidth; int convHeight; int convBufferElements; bool convUseGrayscale; template<int i> __device__ float1 convolutionRow(float1 *data) { float1 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.x += convolutionRow<i-1>(data).x; return val; } template<> __device__ float1 convolutionRow<-1>(float1 *data) { float1 zero; zero.x = 0; return zero; } template<int i> __device__ float1 convolutionColumn(float1 *data) { float1 val = data[(convKernelRadius-i)*convColumnTileWidth]; val.x *= d_Kernel[i]; val.x += convolutionColumn<i-1>(data).x; return val; } template<> __device__ float1 convolutionColumn<-1>(float1 *data) { float1 zero; zero.x = 0; return zero; } template<int i> __device__ float4 convolutionRow(float4 *data) { float4 val = data[convKernelRadius-i]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionRow<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionRow<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } template<int i> __device__ float4 convolutionColumn(float4 *data) { float4 val = data[(convKernelRadius-i) * convColumnTileWidth]; val.x *= d_Kernel[i]; val.y *= d_Kernel[i]; val.z *= d_Kernel[i]; val.w *= d_Kernel[i]; float4 val2 = convolutionColumn<i-1>(data); val.x += val2.x; val.y += val2.y; val.z += val2.z; val.w += val2.w; return val; } template<> __device__ float4 convolutionColumn<-1>(float4 *data) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; return zero; } __global__ void convolutionRowGPU1(float1 *d_Result, float1 *d_Data, int dataW, int dataH) { float1 zero; zero.x = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float1 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if (loadPos >= apronStart) { const int smemPos = loadPos - apronStart; const bool inApron = (loadPos >= apronStartClamped) && (loadPos <= apronEndClamped); data[smemPos] = inApron ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float1 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionRowGPU4(float4 *d_Result, float4 *d_Data, int dataW, int dataH) { float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int rowStart = IMUL(blockIdx.y, dataW); __shared__ float4 data[convKernelRadius + convRowTileWidth + convKernelRadius]; const int tileStart = IMUL(blockIdx.x, convRowTileWidth); const int tileEnd = tileStart + convRowTileWidth - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataW - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataW - 1); const int apronStartAligned = tileStart - convKernelRadiusAligned; const int loadPos = apronStartAligned + threadIdx.x; if (loadPos >= apronStart) { const int smemPos = loadPos - apronStart; const bool inApron = (loadPos >= apronStartClamped) && (loadPos <= apronEndClamped); data[smemPos] = inApron ? d_Data[rowStart + loadPos] : zero; } __syncthreads(); const int writePos = tileStart + threadIdx.x; if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float4 sum = convolutionRow<2 * convKernelRadius>(data + smemPos); d_Result[rowStart + writePos] = sum; } } __global__ void convolutionColumnGPU1to2 ( float2 *d_Result, float1 *d_Data, float1 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { float1 rowValue; float1 zero; zero.x = 0; float2 result; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float1 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; bool inApron; for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { inApron = (y >= apronStartClamped) && (y <= apronEndClamped); data[smemPos] = inApron ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for(int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float1 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; result.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); result.y = atan2f(sum.x, rowValue.x) * RADTODEG; d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } __global__ void convolutionColumnGPU4to2 ( float2 *d_Result, float4 *d_Data, float4 *d_DataRow, int dataW, int dataH, int smemStride, int gmemStride) { //float3 max12, mag4; float3 mag1, mag2, mag3; float3 max34, magMax; float2 result; float4 rowValue; float4 zero; zero.x = 0; zero.y = 0; zero.z = 0; zero.w = 0; const int columnStart = IMUL(blockIdx.x, convColumnTileWidth) + threadIdx.x; __shared__ float4 data[convColumnTileWidth * (convKernelRadius + convColumnTileHeight + convKernelRadius)]; const int tileStart = IMUL(blockIdx.y, convColumnTileHeight); const int tileEnd = tileStart + convColumnTileHeight - 1; const int apronStart = tileStart - convKernelRadius; const int apronEnd = tileEnd + convKernelRadius; const int tileEndClamped = min(tileEnd, dataH - 1); const int apronStartClamped = max(apronStart, 0); const int apronEndClamped = min(apronEnd, dataH - 1); int smemPos = IMUL(threadIdx.y, convColumnTileWidth) + threadIdx.x; int gmemPos = IMUL(apronStart + threadIdx.y, dataW) + columnStart; bool inApron; for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { inApron = (y >= apronStartClamped) && (y <= apronEndClamped); data[smemPos] = inApron ? d_Data[gmemPos] : zero; smemPos += smemStride; gmemPos += gmemStride; } __syncthreads(); smemPos = IMUL(threadIdx.y + convKernelRadius, convColumnTileWidth) + threadIdx.x; gmemPos = IMUL(tileStart + threadIdx.y , dataW) + columnStart; for (int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float4 sum = convolutionColumn<2 * convKernelRadius>(data + smemPos); rowValue = d_DataRow[gmemPos]; mag1.x = sqrtf(sum.x * sum.x + rowValue.x * rowValue.x); mag1.y = sum.x; mag1.z = rowValue.x; mag2.x = sqrtf(sum.y * sum.y + rowValue.y * rowValue.y); mag2.y = sum.y; mag2.z = rowValue.y; mag3.x = sqrtf(sum.z * sum.z + rowValue.z * rowValue.z); mag3.y = sum.z; mag3.z = rowValue.z; max34 = (mag2.x > mag3.x) ? mag2 : mag3; magMax = (mag1.x > max34.x) ? mag1 : max34; result.x = magMax.x; result.y = atan2f(magMax.y, magMax.z); result.y = result.y * 180 / PI + 180; result.y = int(result.y) % 180; //TODO-> if semicerc d_Result[gmemPos] = result; smemPos += smemStride; gmemPos += gmemStride; } } void DeviceAllocHOGConvolutionMemory(void) { int elements = convBufferElements; if (convUseGrayscale) { checkCudaErrors(cudaMalloc(&convBuffer1, sizeof(float1) * elements)); } else { checkCudaErrors(cudaMalloc(&convBuffer4, sizeof(float4) * elements)); } } void CopyInHOGConvolution(void) { float h_Kernel[convKernelWidth]; h_Kernel[0] = 1.0f; h_Kernel[1] = 0; h_Kernel[2] = -1.0f; checkCudaErrors(cudaMemcpyToSymbolAsync(d_Kernel, h_Kernel, convKernelSize, 0, cudaMemcpyHostToDevice, stream)); checkCudaErrors(cudaStreamSynchronize(stream)); } void DeviceFreeHOGConvolutionMemory(void) { if (convUseGrayscale) { checkCudaErrors(cudaFree(convBuffer1)); convBuffer1 = NULL; } else { checkCudaErrors(cudaFree(convBuffer4)); convBuffer4 = NULL; } } void InitConvolution(int width, int height, bool useGrayscale) { convUseGrayscale = useGrayscale; convBufferElements = width * height; } void SetConvolutionSize(int width, int height) { convWidth = width; convHeight = height; blockGridRows = dim3(iDivUp(convWidth, convRowTileWidth), convHeight); blockGridColumns = dim3(iDivUp(convWidth, convColumnTileWidth), iDivUp(convHeight, convColumnTileHeight)); threadBlockRows = dim3(convKernelRadiusAligned + convRowTileWidth + convKernelRadius); threadBlockColumns = dim3(convColumnTileWidth, 8); } void CloseConvolution() {} void ComputeColorGradients1to2(float1* inputImage, float2* outputImage) { convolutionRowGPU1<<<blockGridRows, threadBlockRows, 0, stream>>>( convBuffer1, inputImage, convWidth, convHeight); checkCudaErrors(cudaStreamSynchronize(stream)); convolutionColumnGPU1to2<<<blockGridColumns, threadBlockColumns, 0, stream>>>(outputImage, inputImage, convBuffer1, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); checkCudaErrors(cudaStreamSynchronize(stream)); } void ComputeColorGradients4to2(float4* inputImage, float2* outputImage) { convolutionRowGPU4<<<blockGridRows, threadBlockRows, 0, stream>>>( convBuffer4, inputImage, convWidth, convHeight); checkCudaErrors(cudaStreamSynchronize(stream)); convolutionColumnGPU4to2<<<blockGridColumns, threadBlockColumns, 0, stream>>>(outputImage, inputImage, convBuffer4, convWidth, convHeight, convColumnTileWidth * threadBlockColumns.y, convWidth * threadBlockColumns.y); checkCudaErrors(cudaStreamSynchronize(stream)); }
5ed1d7381d261f40440f277ee4f2bc27d60df83e.hip
// !!! This is a file automatically generated by hipify!!! #include "lab3_io.h" #include "lab3_cuda.h" #include <stdlib.h> #include <omp.h> #include <hip/hip_runtime.h> /* Arguments: arg1: input filename (consist M, N and D) arg2: retention (percentage of information to be retained by PCA) */ int main(int argc, char const *argv[]) { if (argc < 3){ printf("\nLess Arguments\n"); return 0; } if (argc > 3){ printf("\nTOO many Arguments\n"); return 0; } //--------------------------------------------------------------------- int M; //no of rows (samples) in input matrix D (input) int N; //no of columns (features) in input matrix D (input) double* D; //1D array of M x N matrix to be reduced (input) double* U; //1D array of N x N matrix U (to be computed by SVD) double* SIGMA; //1D array of N x M diagonal matrix SIGMA (to be computed by SVD) double* V_T; //1D array of M x M matrix V_T (to be computed by SVD) int K; //no of coulmns (features) in reduced matrix D_HAT (to be computed by PCA) double *D_HAT; //1D array of M x K reduced matrix (to be computed by PCA) int retention; //percentage of information to be retained by PCA (command line input) //--------------------------------------------------------------------- retention = atoi(argv[2]); //retention = 90 means 90% of information should be retained float computation_time; /* -- Pre-defined function -- reads matrix and its dimentions from input file and creats array D #elements in D is M * N format - -------------------------------------------------------------------------------------- | D[0][0] | D[0][1] | ... | D[0][N-1] | D[1][0] | ... | D[1][N-1] | ... | D[M-1][N-1] | -------------------------------------------------------------------------------------- */ read_matrix (argv[1], &M, &N, &D); printf("M, N: %d %d\n", M, N); printf("Retention: %d\n",retention); // printf("Running\n"); U = (double*) malloc(sizeof(double) * N*N); SIGMA = (double*) malloc(sizeof(double) * N); V_T = (double*) malloc(sizeof(double) * M*M); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ // for(int i=0;i<M;i++){ // for(int j=0;j<N;j++) printf("%f ", D[i*N+j]); // printf("\n"); // } SVD_and_PCA(M, N, D, &U, &SIGMA, &V_T, &D_HAT, &K, retention); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&computation_time, start, stop); printf("Time taken: %f\n", computation_time); /* --Pre-defined functions -- checks for correctness of results computed by SVD and PCA and outputs the results */ write_result(M, N, D, U, SIGMA, V_T, K, D_HAT, computation_time); printf("Value of K: %d\n", K); return 0; }
5ed1d7381d261f40440f277ee4f2bc27d60df83e.cu
#include "lab3_io.h" #include "lab3_cuda.h" #include <stdlib.h> #include <omp.h> #include <cuda.h> /* Arguments: arg1: input filename (consist M, N and D) arg2: retention (percentage of information to be retained by PCA) */ int main(int argc, char const *argv[]) { if (argc < 3){ printf("\nLess Arguments\n"); return 0; } if (argc > 3){ printf("\nTOO many Arguments\n"); return 0; } //--------------------------------------------------------------------- int M; //no of rows (samples) in input matrix D (input) int N; //no of columns (features) in input matrix D (input) double* D; //1D array of M x N matrix to be reduced (input) double* U; //1D array of N x N matrix U (to be computed by SVD) double* SIGMA; //1D array of N x M diagonal matrix SIGMA (to be computed by SVD) double* V_T; //1D array of M x M matrix V_T (to be computed by SVD) int K; //no of coulmns (features) in reduced matrix D_HAT (to be computed by PCA) double *D_HAT; //1D array of M x K reduced matrix (to be computed by PCA) int retention; //percentage of information to be retained by PCA (command line input) //--------------------------------------------------------------------- retention = atoi(argv[2]); //retention = 90 means 90% of information should be retained float computation_time; /* -- Pre-defined function -- reads matrix and its dimentions from input file and creats array D #elements in D is M * N format - -------------------------------------------------------------------------------------- | D[0][0] | D[0][1] | ... | D[0][N-1] | D[1][0] | ... | D[1][N-1] | ... | D[M-1][N-1] | -------------------------------------------------------------------------------------- */ read_matrix (argv[1], &M, &N, &D); printf("M, N: %d %d\n", M, N); printf("Retention: %d\n",retention); // printf("Running\n"); U = (double*) malloc(sizeof(double) * N*N); SIGMA = (double*) malloc(sizeof(double) * N); V_T = (double*) malloc(sizeof(double) * M*M); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // /* // ***************************************************** // TODO -- You must implement this function // ***************************************************** // */ // for(int i=0;i<M;i++){ // for(int j=0;j<N;j++) printf("%f ", D[i*N+j]); // printf("\n"); // } SVD_and_PCA(M, N, D, &U, &SIGMA, &V_T, &D_HAT, &K, retention); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&computation_time, start, stop); printf("Time taken: %f\n", computation_time); /* --Pre-defined functions -- checks for correctness of results computed by SVD and PCA and outputs the results */ write_result(M, N, D, U, SIGMA, V_T, K, D_HAT, computation_time); printf("Value of K: %d\n", K); return 0; }